diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 36df1617ff..65a0c1bd9a 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -223,6 +223,11 @@ "sideEffects": { "description": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", "type": "string" + }, + "timeoutSeconds": { + "description": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", + "format": "int32", + "type": "integer" } }, "required": [ diff --git a/cluster/addons/fluentd-elasticsearch/es-image/Dockerfile b/cluster/addons/fluentd-elasticsearch/es-image/Dockerfile index 58847372c9..bc53ec0226 100644 --- a/cluster/addons/fluentd-elasticsearch/es-image/Dockerfile +++ b/cluster/addons/fluentd-elasticsearch/es-image/Dockerfile @@ -17,7 +17,7 @@ COPY elasticsearch_logging_discovery.go go.mod go.sum / RUN CGO_ENABLED=0 GOOS=linux GO111MODULE=on go build -a -ldflags "-w" -o /elasticsearch_logging_discovery /elasticsearch_logging_discovery.go -FROM docker.elastic.co/elasticsearch/elasticsearch-oss:6.3.2 +FROM docker.elastic.co/elasticsearch/elasticsearch-oss:6.6.1 VOLUME ["/data"] EXPOSE 9200 9300 diff --git a/cluster/addons/fluentd-elasticsearch/es-image/Makefile b/cluster/addons/fluentd-elasticsearch/es-image/Makefile index e2b8e68757..b3e154b16a 100755 --- a/cluster/addons/fluentd-elasticsearch/es-image/Makefile +++ b/cluster/addons/fluentd-elasticsearch/es-image/Makefile @@ -16,7 +16,7 @@ PREFIX = gcr.io/fluentd-elasticsearch IMAGE = elasticsearch -TAG = v6.3.0 +TAG = v6.6.1 build: - gcloud builds submit --tag ${PREFIX}/${IMAGE}:${TAG} \ No newline at end of file + gcloud builds submit --tag ${PREFIX}/${IMAGE}:${TAG} diff --git a/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml b/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml index b92bb1b2e3..7964aae8db 100644 --- a/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml +++ b/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml @@ -54,7 +54,7 @@ metadata: namespace: kube-system labels: k8s-app: elasticsearch-logging - version: v6.3.0 + version: v6.6.1 kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile spec: @@ -63,17 +63,17 @@ spec: selector: matchLabels: k8s-app: elasticsearch-logging - version: v6.3.0 + version: v6.6.1 template: metadata: labels: k8s-app: elasticsearch-logging - version: v6.3.0 + version: v6.6.1 kubernetes.io/cluster-service: "true" spec: serviceAccountName: elasticsearch-logging containers: - - image: k8s.gcr.io/elasticsearch:v6.3.0 + - image: gcr.io/fluentd-elasticsearch/elasticsearch:v6.6.1 name: elasticsearch-logging resources: # need more cpu upon initialization, therefore burstable class diff --git a/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml b/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml index 1c0efd9d96..67f541b3ae 100644 --- a/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml +++ b/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml @@ -21,7 +21,7 @@ spec: spec: containers: - name: kibana-logging - image: docker.elastic.co/kibana/kibana-oss:6.3.2 + image: docker.elastic.co/kibana/kibana-oss:6.6.1 resources: # need more cpu upon initialization, therefore burstable class limits: diff --git a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml index c05de8f887..5c195fb086 100644 --- a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml +++ b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml @@ -66,19 +66,19 @@ spec: - '-c' - > LIVENESS_THRESHOLD_SECONDS=${LIVENESS_THRESHOLD_SECONDS:-300}; - STUCK_THRESHOLD_SECONDS=${LIVENESS_THRESHOLD_SECONDS:-900}; + STUCK_THRESHOLD_SECONDS=${STUCK_THRESHOLD_SECONDS:-900}; if [ ! -e /var/log/fluentd-buffers ]; then exit 1; fi; touch -d "${STUCK_THRESHOLD_SECONDS} seconds ago" /tmp/marker-stuck; - if [[ -z "$(find /var/log/fluentd-buffers -type f -newer /tmp/marker-stuck -print -quit)" ]]; + if [ -z "$(find /var/log/fluentd-buffers -type d -newer /tmp/marker-stuck -print -quit)" ]; then rm -rf /var/log/fluentd-buffers; exit 1; fi; touch -d "${LIVENESS_THRESHOLD_SECONDS} seconds ago" /tmp/marker-liveness; - if [[ -z "$(find /var/log/fluentd-buffers -type f -newer /tmp/marker-liveness -print -quit)" ]]; + if [ -z "$(find /var/log/fluentd-buffers -type d -newer /tmp/marker-liveness -print -quit)" ]; then exit 1; fi; diff --git a/cluster/gce/upgrade.sh b/cluster/gce/upgrade.sh index 10b79394fa..b346080816 100755 --- a/cluster/gce/upgrade.sh +++ b/cluster/gce/upgrade.sh @@ -267,7 +267,7 @@ function prepare-node-upgrade() { # TODO(zmerlynn): Get configure-vm script from ${version}. (Must plumb this # through all create-linux-node-instance-template implementations). - local template_name=$(get-template-name-from-version ${SANITIZED_VERSION}) + local template_name=$(get-template-name-from-version ${SANITIZED_VERSION} ${NODE_INSTANCE_PREFIX}) create-linux-node-instance-template "${template_name}" # The following is echo'd so that callers can get the template name. echo "Instance template name: ${template_name}" @@ -373,7 +373,7 @@ function do-node-upgrade() { # Do the actual upgrade. # NOTE(zmerlynn): If you are changing this gcloud command, update # test/e2e/cluster_upgrade.go to match this EXACTLY. - local template_name=$(get-template-name-from-version ${SANITIZED_VERSION}) + local template_name=$(get-template-name-from-version ${SANITIZED_VERSION} ${NODE_INSTANCE_PREFIX}) local old_templates=() local updates=() for group in ${INSTANCE_GROUPS[@]}; do diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 99ea09119f..d253a1183f 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -112,7 +112,10 @@ if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then fi fi +# These prefixes must not be prefixes of each other, so that they can be used to +# detect mutually exclusive sets of nodes. NODE_INSTANCE_PREFIX=${NODE_INSTANCE_PREFIX:-"${INSTANCE_PREFIX}-minion"} +WINDOWS_NODE_INSTANCE_PREFIX=${WINDOWS_NODE_INSTANCE_PREFIX:-"${INSTANCE_PREFIX}-windows-node"} NODE_TAGS="${NODE_TAG}" @@ -373,9 +376,12 @@ function upload-tars() { # # Assumed vars: # NODE_INSTANCE_PREFIX +# WINDOWS_NODE_INSTANCE_PREFIX # Vars set: # NODE_NAMES # INSTANCE_GROUPS +# WINDOWS_NODE_NAMES +# WINDOWS_INSTANCE_GROUPS function detect-node-names() { detect-project INSTANCE_GROUPS=() @@ -383,6 +389,12 @@ function detect-node-names() { --project "${PROJECT}" \ --filter "name ~ '${NODE_INSTANCE_PREFIX}-.+' AND zone:(${ZONE})" \ --format='value(name)' || true)) + WINDOWS_INSTANCE_GROUPS=() + WINDOWS_INSTANCE_GROUPS+=($(gcloud compute instance-groups managed list \ + --project "${PROJECT}" \ + --filter "name ~ '${WINDOWS_NODE_INSTANCE_PREFIX}-.+' AND zone:(${ZONE})" \ + --format='value(name)' || true)) + NODE_NAMES=() if [[ -n "${INSTANCE_GROUPS[@]:-}" ]]; then for group in "${INSTANCE_GROUPS[@]}"; do @@ -395,6 +407,14 @@ function detect-node-names() { if [[ -n "${HEAPSTER_MACHINE_TYPE:-}" ]]; then NODE_NAMES+=("${NODE_INSTANCE_PREFIX}-heapster") fi + WINDOWS_NODE_NAMES=() + if [[ -n "${WINDOWS_INSTANCE_GROUPS[@]:-}" ]]; then + for group in "${WINDOWS_INSTANCE_GROUPS[@]}"; do + WINDOWS_NODE_NAMES+=($(gcloud compute instance-groups managed \ + list-instances "${group}" --zone "${ZONE}" --project "${PROJECT}" \ + --format='value(instance)')) + done + fi echo "INSTANCE_GROUPS=${INSTANCE_GROUPS[*]:-}" >&2 echo "NODE_NAMES=${NODE_NAMES[*]:-}" >&2 @@ -1403,6 +1423,7 @@ function build-windows-kube-env { build-linux-kube-env false $file cat >>$file </dev/null; then gcloud compute instance-groups managed delete \ --project "${PROJECT}" \ @@ -3087,7 +3113,7 @@ function kube-down() { local -a minions minions=( $(gcloud compute instances list \ --project "${PROJECT}" \ - --filter="name ~ '${NODE_INSTANCE_PREFIX}-.+' AND zone:(${ZONE})" \ + --filter="(name ~ '${NODE_INSTANCE_PREFIX}-.+' OR name ~ '${WINDOWS_NODE_INSTANCE_PREFIX}-.+') AND zone:(${ZONE})" \ --format='value(name)') ) # If any minions are running, delete them in batches. while (( "${#minions[@]}" > 0 )); do @@ -3242,15 +3268,19 @@ function set-replica-name() { REPLICA_NAME="${MASTER_NAME}-${suffix}" } -# Gets the instance template for given NODE_INSTANCE_PREFIX. It echos the template name so that the function -# output can be used. +# Gets the instance templates in use by the cluster. It echos the template names +# so that the function output can be used. # Assumed vars: # NODE_INSTANCE_PREFIX +# WINDOWS_NODE_INSTANCE_PREFIX # # $1: project function get-template() { + local linux_filter="${NODE_INSTANCE_PREFIX}-template(-(${KUBE_RELEASE_VERSION_DASHED_REGEX}|${KUBE_CI_VERSION_DASHED_REGEX}))?" + local windows_filter="${WINDOWS_NODE_INSTANCE_PREFIX}-template(-(${KUBE_RELEASE_VERSION_DASHED_REGEX}|${KUBE_CI_VERSION_DASHED_REGEX}))?" + gcloud compute instance-templates list \ - --filter="name ~ '${NODE_INSTANCE_PREFIX}-template(-(${KUBE_RELEASE_VERSION_DASHED_REGEX}|${KUBE_CI_VERSION_DASHED_REGEX}))?'" \ + --filter="name ~ '${linux_filter}' OR name ~ '${windows_filter}'" \ --project="${1}" --format='value(name)' } @@ -3259,6 +3289,7 @@ function get-template() { # Assumed vars: # MASTER_NAME # NODE_INSTANCE_PREFIX +# WINDOWS_NODE_INSTANCE_PREFIX # ZONE # REGION # Vars set: @@ -3274,11 +3305,19 @@ function check-resources() { KUBE_RESOURCE_FOUND="Managed instance groups ${INSTANCE_GROUPS[@]}" return 1 fi + if [[ -n "${WINDOWS_INSTANCE_GROUPS[@]:-}" ]]; then + KUBE_RESOURCE_FOUND="Managed instance groups ${WINDOWS_INSTANCE_GROUPS[@]}" + return 1 + fi if gcloud compute instance-templates describe --project "${PROJECT}" "${NODE_INSTANCE_PREFIX}-template" &>/dev/null; then KUBE_RESOURCE_FOUND="Instance template ${NODE_INSTANCE_PREFIX}-template" return 1 fi + if gcloud compute instance-templates describe --project "${PROJECT}" "${WINDOWS_NODE_INSTANCE_PREFIX}-template" &>/dev/null; then + KUBE_RESOURCE_FOUND="Instance template ${WINDOWS_NODE_INSTANCE_PREFIX}-template" + return 1 + fi if gcloud compute instances describe --project "${PROJECT}" "${MASTER_NAME}" --zone "${ZONE}" &>/dev/null; then KUBE_RESOURCE_FOUND="Kubernetes master ${MASTER_NAME}" @@ -3294,10 +3333,10 @@ function check-resources() { local -a minions minions=( $(gcloud compute instances list \ --project "${PROJECT}" \ - --filter="name ~ '${NODE_INSTANCE_PREFIX}-.+' AND zone:(${ZONE})" \ + --filter="(name ~ '${NODE_INSTANCE_PREFIX}-.+' OR name ~ '${WINDOWS_NODE_INSTANCE_PREFIX}-.+') AND zone:(${ZONE})" \ --format='value(name)') ) if (( "${#minions[@]}" > 0 )); then - KUBE_RESOURCE_FOUND="${#minions[@]} matching matching ${NODE_INSTANCE_PREFIX}-.+" + KUBE_RESOURCE_FOUND="${#minions[@]} matching ${NODE_INSTANCE_PREFIX}-.+ or ${WINDOWS_NODE_INSTANCE_PREFIX}-.+" return 1 fi diff --git a/cluster/gce/windows/k8s-node-setup.psm1 b/cluster/gce/windows/k8s-node-setup.psm1 index bd6cb961ad..d8c7307a64 100644 --- a/cluster/gce/windows/k8s-node-setup.psm1 +++ b/cluster/gce/windows/k8s-node-setup.psm1 @@ -250,11 +250,12 @@ function Disable-WindowsDefender { # Creates directories where other functions in this module will read and write # data. +# Note: C:\tmp is required for running certain kubernetes tests. function Create-Directories { Log-Output "Creating ${env:K8S_DIR} and its subdirectories." ForEach ($dir in ("${env:K8S_DIR}", "${env:NODE_DIR}", "${env:LOGS_DIR}", "${env:CNI_DIR}", "${env:CNI_CONFIG_DIR}", "${env:MANIFESTS_DIR}", - "${env:PKI_DIR}")) { + "${env:PKI_DIR}"), "C:\tmp") { mkdir -Force $dir } } diff --git a/cluster/log-dump/log-dump.sh b/cluster/log-dump/log-dump.sh index 4dc16c6ae2..d9defea525 100755 --- a/cluster/log-dump/log-dump.sh +++ b/cluster/log-dump/log-dump.sh @@ -52,6 +52,10 @@ readonly initd_logfiles="docker/log" readonly supervisord_logfiles="kubelet.log supervisor/supervisord.log supervisor/kubelet-stdout.log supervisor/kubelet-stderr.log supervisor/docker-stdout.log supervisor/docker-stderr.log" readonly systemd_services="kubelet kubelet-monitor kube-container-runtime-monitor ${LOG_DUMP_SYSTEMD_SERVICES:-docker}" readonly dump_systemd_journal="${LOG_DUMP_SYSTEMD_JOURNAL:-false}" +# Log files found in WINDOWS_LOGS_DIR on Windows nodes: +readonly windows_node_logfiles="kubelet.log kube-proxy.log docker.log" +# Log files found in other directories on Windows nodes: +readonly windows_node_otherfiles="C:\\Windows\\MEMORY.dmp" # Limit the number of concurrent node connections so that we don't run out of # file descriptors for large clusters. @@ -195,6 +199,66 @@ function save-logs() { copy-logs-from-node "${node_name}" "${dir}" "${files}" } +# Saves a copy of the Windows Docker event log to ${WINDOWS_LOGS_DIR}\docker.log +# on node $1. +function export-windows-docker-event-log() { + local -r node="${1}" + + local -r powershell_cmd="powershell.exe -Command \$log=\$(Get-EventLog -LogName Application -Source Docker); Set-Content '${WINDOWS_LOGS_DIR}\\docker.log' \$log.Message" + + # Retry up to 3 times to allow ssh keys to be properly propagated and + # stored. + for retry in {1..3}; do + if gcloud compute ssh --project "${PROJECT}" --zone "${ZONE}" "${node}" \ + --command "$powershell_cmd"; then + break + else + sleep 10 + fi + done +} + +# Save log files and serial console output from Windows node $1 into local +# directory $2. +# This function shouldn't ever trigger errexit. +function save-logs-windows() { + local -r node="${1}" + local -r dest_dir="${2}" + + if [[ ! "${gcloud_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then + echo "Not saving logs for ${node}, Windows log dumping requires gcloud support" + return + fi + + export-windows-docker-event-log "${node}" + + local remote_files=() + for file in ${windows_node_logfiles[@]}; do + remote_files+=( "${WINDOWS_LOGS_DIR}\\${file}" ) + done + remote_files+=( "${windows_node_otherfiles[@]}" ) + + # TODO(pjh, yujuhong): handle rotated logs and copying multiple files at the + # same time. + for remote_file in ${remote_files[@]}; do + # Retry up to 3 times to allow ssh keys to be properly propagated and + # stored. + for retry in {1..3}; do + if gcloud compute scp --recurse --project "${PROJECT}" \ + --zone "${ZONE}" "${node}:${remote_file}" "${dest_dir}" \ + > /dev/null; then + break + else + sleep 10 + fi + done + done + + # Serial port 1 contains the Windows console output. + gcloud compute instances get-serial-port-output --project "${PROJECT}" \ + --zone "${ZONE}" --port 1 "${node}" > "${dest_dir}/serial-1.log" || true +} + # Execute a command in container $2 on node $1. # Uses docker because the container may not ordinarily permit direct execution. function run-in-docker-container() { @@ -247,8 +311,13 @@ function dump_masters() { fi } +# Dumps logs from nodes in the cluster. Linux nodes to dump logs from can be +# specified via $1 or $use_custom_instance_list. If not specified then the nodes +# to dump logs for will be detected using detect-node-names(); if Windows nodes +# are present then they will be detected and their logs will be dumped too. function dump_nodes() { local node_names=() + local windows_node_names=() if [[ -n "${1:-}" ]]; then echo "Dumping logs for nodes provided as args to dump_nodes() function" node_names=( "$@" ) @@ -264,9 +333,12 @@ function dump_nodes() { if [[ -n "${NODE_NAMES:-}" ]]; then node_names=( "${NODE_NAMES[@]}" ) fi + if [[ -n "${WINDOWS_NODE_NAMES:-}" ]]; then + windows_node_names=( "${WINDOWS_NODE_NAMES[@]}" ) + fi fi - if [[ "${#node_names[@]}" == 0 ]]; then + if [[ "${#node_names[@]}" == 0 && "${#windows_node_names[@]}" == 0 ]]; then echo "No nodes found!" return fi @@ -276,24 +348,31 @@ function dump_nodes() { node_logfiles_all="${node_logfiles_all} ${hollow_node_logfiles}" fi - nodes_selected_for_logs=() + linux_nodes_selected_for_logs=() if [[ -n "${LOGDUMP_ONLY_N_RANDOM_NODES:-}" ]]; then # We randomly choose 'LOGDUMP_ONLY_N_RANDOM_NODES' many nodes for fetching logs. for index in `shuf -i 0-$(( ${#node_names[*]} - 1 )) -n ${LOGDUMP_ONLY_N_RANDOM_NODES}` do - nodes_selected_for_logs+=("${node_names[$index]}") + linux_nodes_selected_for_logs+=("${node_names[$index]}") done else - nodes_selected_for_logs=( "${node_names[@]}" ) + linux_nodes_selected_for_logs=( "${node_names[@]}" ) fi + all_selected_nodes=( "${linux_nodes_selected_for_logs[@]}" ) + all_selected_nodes+=( "${windows_node_names[@]}" ) proc=${max_dump_processes} - for node_name in "${nodes_selected_for_logs[@]}"; do + for i in "${!all_selected_nodes[@]}"; do + node_name="${all_selected_nodes[$i]}" node_dir="${report_dir}/${node_name}" mkdir -p "${node_dir}" - # Save logs in the background. This speeds up things when there are - # many nodes. - save-logs "${node_name}" "${node_dir}" "${node_logfiles_all}" "${node_systemd_services}" & + if [[ "${i}" -lt "${#linux_nodes_selected_for_logs[@]}" ]]; then + # Save logs in the background. This speeds up things when there are + # many nodes. + save-logs "${node_name}" "${node_dir}" "${node_logfiles_all}" "${node_systemd_services}" & + else + save-logs-windows "${node_name}" "${node_dir}" & + fi # We don't want to run more than ${max_dump_processes} at a time, so # wait once we hit that many nodes. This isn't ideal, since one might @@ -311,6 +390,9 @@ function dump_nodes() { } # Collect names of nodes which didn't run logexporter successfully. +# This function examines NODE_NAMES but not WINDOWS_NODE_NAMES since logexporter +# does not run on Windows nodes. +# # Note: This step is O(#nodes^2) as we check if each node is present in the list of succeeded nodes. # Making it linear would add code complexity without much benefit (as it just takes ~1s for 5k nodes). # Assumes: @@ -328,6 +410,8 @@ function find_non_logexported_nodes() { done } +# This function examines NODE_NAMES but not WINDOWS_NODE_NAMES since logexporter +# does not run on Windows nodes. function dump_nodes_with_logexporter() { if [[ -n "${use_custom_instance_list}" ]]; then echo "Dumping logs for nodes provided by log_dump_custom_get_instances() function" @@ -446,10 +530,16 @@ function detect_node_failures() { fi detect-node-names - if [ -z "${INSTANCE_GROUPS:-}" ]; then + if [[ "${KUBERNETES_PROVIDER}" == "gce" ]]; then + local all_instance_groups=(${INSTANCE_GROUPS[@]} ${WINDOWS_INSTANCE_GROUPS[@]}) + else + local all_instance_groups=(${INSTANCE_GROUPS[@]}) + fi + + if [ -z "${all_instance_groups:-}" ]; then return fi - for group in "${INSTANCE_GROUPS[@]}"; do + for group in "${all_instance_groups[@]}"; do local creation_timestamp=$(gcloud compute instance-groups managed describe \ "${group}" \ --project "${PROJECT}" \ diff --git a/cluster/validate-cluster.sh b/cluster/validate-cluster.sh index 3f7cd3d29c..c28dcb0df5 100755 --- a/cluster/validate-cluster.sh +++ b/cluster/validate-cluster.sh @@ -56,7 +56,7 @@ if [[ "${KUBERNETES_PROVIDER:-}" == "gce" ]]; then # In multizone mode we need to add instances for all nodes in the region. if [[ "${MULTIZONE:-}" == "true" ]]; then EXPECTED_NUM_NODES=$(gcloud -q compute instances list --project="${PROJECT}" --format=[no-heading] \ - --filter="name ~ '${NODE_INSTANCE_PREFIX}.*' AND zone:($(gcloud -q compute zones list --project="${PROJECT}" --filter=region=${REGION} --format=csv[no-heading]\(name\) | tr "\n" "," | sed "s/,$//"))" | wc -l) + --filter="(name ~ '${NODE_INSTANCE_PREFIX}.*' OR name ~ '${WINDOWS_NODE_INSTANCE_PREFIX}.*') AND zone:($(gcloud -q compute zones list --project="${PROJECT}" --filter=region=${REGION} --format=csv[no-heading]\(name\) | tr "\n" "," | sed "s/,$//"))" | wc -l) echo "Computing number of nodes, NODE_INSTANCE_PREFIX=${NODE_INSTANCE_PREFIX}, REGION=${REGION}, EXPECTED_NUM_NODES=${EXPECTED_NUM_NODES}" fi else diff --git a/cmd/kubeadm/app/BUILD b/cmd/kubeadm/app/BUILD index 3f3441f41e..7a1263a587 100644 --- a/cmd/kubeadm/app/BUILD +++ b/cmd/kubeadm/app/BUILD @@ -41,6 +41,7 @@ filegroup( "//cmd/kubeadm/app/phases/bootstraptoken/node:all-srcs", "//cmd/kubeadm/app/phases/certs:all-srcs", "//cmd/kubeadm/app/phases/controlplane:all-srcs", + "//cmd/kubeadm/app/phases/copycerts:all-srcs", "//cmd/kubeadm/app/phases/etcd:all-srcs", "//cmd/kubeadm/app/phases/kubeconfig:all-srcs", "//cmd/kubeadm/app/phases/kubelet:all-srcs", @@ -48,7 +49,6 @@ filegroup( "//cmd/kubeadm/app/phases/patchnode:all-srcs", "//cmd/kubeadm/app/phases/selfhosting:all-srcs", "//cmd/kubeadm/app/phases/upgrade:all-srcs", - "//cmd/kubeadm/app/phases/uploadcerts:all-srcs", "//cmd/kubeadm/app/phases/uploadconfig:all-srcs", "//cmd/kubeadm/app/preflight:all-srcs", "//cmd/kubeadm/app/util:all-srcs", diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha3/types.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha3/types.go index 56d9c280fb..67bdfe7e7c 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha3/types.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha3/types.go @@ -147,7 +147,7 @@ type APIEndpoint struct { // NodeRegistrationOptions holds fields that relate to registering a new control-plane or node to the cluster, either via "kubeadm init" or "kubeadm join" type NodeRegistrationOptions struct { - // Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm joiń` operation. + // Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. // This field is also used in the CommonName field of the kubelet's client certificate to the API server. // Defaults to the hostname of the node if not provided. Name string `json:"name,omitempty"` diff --git a/cmd/kubeadm/app/apis/kubeadm/validation/validation.go b/cmd/kubeadm/app/apis/kubeadm/validation/validation.go index af44803a5e..f8331e42bb 100644 --- a/cmd/kubeadm/app/apis/kubeadm/validation/validation.go +++ b/cmd/kubeadm/app/apis/kubeadm/validation/validation.go @@ -428,6 +428,7 @@ func isAllowedFlag(flagName string) bool { kubeadmcmdoptions.NodeCRISocket, kubeadmcmdoptions.KubeconfigDir, kubeadmcmdoptions.UploadCerts, + kubeadmcmdoptions.CertificateKey, "print-join-command", "rootfs", "v") if knownFlags.Has(flagName) { return true diff --git a/cmd/kubeadm/app/cmd/BUILD b/cmd/kubeadm/app/cmd/BUILD index e22ef05bbd..8cd6a13108 100644 --- a/cmd/kubeadm/app/cmd/BUILD +++ b/cmd/kubeadm/app/cmd/BUILD @@ -84,6 +84,7 @@ go_test( ], embed = [":go_default_library"], deps = [ + "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/v1beta1:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/validation:go_default_library", "//cmd/kubeadm/app/cmd/options:go_default_library", diff --git a/cmd/kubeadm/app/cmd/join.go b/cmd/kubeadm/app/cmd/join.go index c4533bb8a4..8376245e74 100644 --- a/cmd/kubeadm/app/cmd/join.go +++ b/cmd/kubeadm/app/cmd/join.go @@ -127,6 +127,7 @@ type joinOptions struct { controlPlane bool ignorePreflightErrors []string externalcfg *kubeadmapiv1beta1.JoinConfiguration + certificateKey string } // compile-time assert that the local data object satisfies the phases data interface. @@ -142,6 +143,7 @@ type joinData struct { clientSets map[string]*clientset.Clientset ignorePreflightErrors sets.String outputWriter io.Writer + certificateKey string } // NewCmdJoin returns "kubeadm join" command. @@ -192,7 +194,7 @@ func NewCmdJoin(out io.Writer, joinOptions *joinOptions) *cobra.Command { } addJoinConfigFlags(cmd.Flags(), joinOptions.externalcfg) - addJoinOtherFlags(cmd.Flags(), &joinOptions.cfgPath, &joinOptions.ignorePreflightErrors, &joinOptions.controlPlane, &joinOptions.token) + addJoinOtherFlags(cmd.Flags(), &joinOptions.cfgPath, &joinOptions.ignorePreflightErrors, &joinOptions.controlPlane, &joinOptions.token, &joinOptions.certificateKey) joinRunner.AppendPhase(phases.NewPreflightPhase()) joinRunner.AppendPhase(phases.NewControlPlanePreparePhase()) @@ -254,7 +256,14 @@ func addJoinConfigFlags(flagSet *flag.FlagSet, cfg *kubeadmapiv1beta1.JoinConfig } // addJoinOtherFlags adds join flags that are not bound to a configuration file to the given flagset -func addJoinOtherFlags(flagSet *flag.FlagSet, cfgPath *string, ignorePreflightErrors *[]string, controlPlane *bool, token *string) { +func addJoinOtherFlags( + flagSet *flag.FlagSet, + cfgPath *string, + ignorePreflightErrors *[]string, + controlPlane *bool, + token *string, + certificateKey *string, +) { flagSet.StringVar( cfgPath, options.CfgPath, *cfgPath, "Path to kubeadm config file.", @@ -271,6 +280,10 @@ func addJoinOtherFlags(flagSet *flag.FlagSet, cfgPath *string, ignorePreflightEr controlPlane, options.ControlPlane, *controlPlane, "Create a new control plane instance on this node", ) + flagSet.StringVar( + certificateKey, options.CertificateKey, "", + "Use this key to decrypt the certificate secrets uploaded by init.", + ) } // newJoinOptions returns a struct ready for being used for creating cmd join flags. @@ -375,9 +388,15 @@ func newJoinData(cmd *cobra.Command, args []string, options *joinOptions, out io clientSets: map[string]*clientset.Clientset{}, ignorePreflightErrors: ignorePreflightErrorsSet, outputWriter: out, + certificateKey: options.certificateKey, }, nil } +// CertificateKey returns the key used to encrypt the certs. +func (j *joinData) CertificateKey() string { + return j.certificateKey +} + // Cfg returns the JoinConfiguration. func (j *joinData) Cfg() *kubeadmapi.JoinConfiguration { return j.cfg diff --git a/cmd/kubeadm/app/cmd/options/constant.go b/cmd/kubeadm/app/cmd/options/constant.go index 2386a20edd..f89b6dfab0 100644 --- a/cmd/kubeadm/app/cmd/options/constant.go +++ b/cmd/kubeadm/app/cmd/options/constant.go @@ -121,4 +121,7 @@ const ( // UploadCerts flag instruct kubeadm to upload certificates UploadCerts = "experimental-upload-certs" + + // CertificateKey flag sets the key used to encrypt and decrypt certificate secrets + CertificateKey = "certificate-key" ) diff --git a/cmd/kubeadm/app/cmd/phases/init/BUILD b/cmd/kubeadm/app/cmd/phases/init/BUILD index 22033bbf72..eb9eaf416a 100644 --- a/cmd/kubeadm/app/cmd/phases/init/BUILD +++ b/cmd/kubeadm/app/cmd/phases/init/BUILD @@ -33,12 +33,12 @@ go_library( "//cmd/kubeadm/app/phases/bootstraptoken/node:go_default_library", "//cmd/kubeadm/app/phases/certs:go_default_library", "//cmd/kubeadm/app/phases/controlplane:go_default_library", + "//cmd/kubeadm/app/phases/copycerts:go_default_library", "//cmd/kubeadm/app/phases/etcd:go_default_library", "//cmd/kubeadm/app/phases/kubeconfig:go_default_library", "//cmd/kubeadm/app/phases/kubelet:go_default_library", "//cmd/kubeadm/app/phases/markcontrolplane:go_default_library", "//cmd/kubeadm/app/phases/patchnode:go_default_library", - "//cmd/kubeadm/app/phases/uploadcerts:go_default_library", "//cmd/kubeadm/app/phases/uploadconfig:go_default_library", "//cmd/kubeadm/app/preflight:go_default_library", "//cmd/kubeadm/app/util:go_default_library", diff --git a/cmd/kubeadm/app/cmd/phases/init/uploadcerts.go b/cmd/kubeadm/app/cmd/phases/init/uploadcerts.go index 447f56bf2f..3c0fb5c4b0 100644 --- a/cmd/kubeadm/app/cmd/phases/init/uploadcerts.go +++ b/cmd/kubeadm/app/cmd/phases/init/uploadcerts.go @@ -26,7 +26,7 @@ import ( "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" - "k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadcerts" + "k8s.io/kubernetes/cmd/kubeadm/app/phases/copycerts" ) // NewUploadCertsPhase returns the uploadCerts phase @@ -59,14 +59,14 @@ func runUploadCerts(c workflow.RunData) error { } if len(data.CertificateKey()) == 0 { - certificateKey, err := uploadcerts.CreateCertificateKey() + certificateKey, err := copycerts.CreateCertificateKey() if err != nil { return err } data.SetCertificateKey(certificateKey) } - if err := uploadcerts.UploadCerts(client, data.Cfg(), data.CertificateKey()); err != nil { + if err := copycerts.UploadCerts(client, data.Cfg(), data.CertificateKey()); err != nil { return errors.Wrap(err, "error uploading certs") } return nil diff --git a/cmd/kubeadm/app/cmd/phases/join/BUILD b/cmd/kubeadm/app/cmd/phases/join/BUILD index 6eb6d73fdb..f9eed0bb11 100644 --- a/cmd/kubeadm/app/cmd/phases/join/BUILD +++ b/cmd/kubeadm/app/cmd/phases/join/BUILD @@ -20,6 +20,7 @@ go_library( "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/phases/certs:go_default_library", "//cmd/kubeadm/app/phases/controlplane:go_default_library", + "//cmd/kubeadm/app/phases/copycerts:go_default_library", "//cmd/kubeadm/app/phases/etcd:go_default_library", "//cmd/kubeadm/app/phases/kubeconfig:go_default_library", "//cmd/kubeadm/app/phases/kubelet:go_default_library", diff --git a/cmd/kubeadm/app/cmd/phases/join/controlplaneprepare.go b/cmd/kubeadm/app/cmd/phases/join/controlplaneprepare.go index c006df16aa..858115b5b4 100644 --- a/cmd/kubeadm/app/cmd/phases/join/controlplaneprepare.go +++ b/cmd/kubeadm/app/cmd/phases/join/controlplaneprepare.go @@ -21,13 +21,17 @@ import ( "github.com/pkg/errors" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" "k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane" + "k8s.io/kubernetes/cmd/kubeadm/app/phases/copycerts" kubeconfigphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig" + kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" ) // NewControlPlanePreparePhase creates a kubeadm workflow phase that implements the preparation of the node to serve a control plane @@ -35,7 +39,6 @@ func NewControlPlanePreparePhase() workflow.Phase { return workflow.Phase{ Name: "control-plane-prepare", Short: "Prepares the machine for serving a control plane.", - Long: cmdutil.MacroCommandLongDescription, Phases: []workflow.Phase{ { Name: "all", @@ -43,6 +46,7 @@ func NewControlPlanePreparePhase() workflow.Phase { InheritFlags: getControlPlanePreparePhaseFlags(), RunAllSiblings: true, }, + newControlPlanePrepareDownloadCertsSubphase(), newControlPlanePrepareCertsSubphase(), newControlPlanePrepareKubeconfigSubphase(), newControlPlanePrepareManifestsSubphases(), @@ -60,6 +64,20 @@ func getControlPlanePreparePhaseFlags() []string { options.TokenDiscovery, options.TokenDiscoveryCAHash, options.TokenDiscoverySkipCAHash, + options.CertificateKey, + } +} + +func newControlPlanePrepareDownloadCertsSubphase() workflow.Phase { + return workflow.Phase{ + Name: "download-certs", + Short: fmt.Sprintf("Download certificates from %s", kubeadmconstants.KubeadmCertsSecret), + Long: cmdutil.MacroCommandLongDescription, + Run: runControlPlanePrepareDownloadCertsPhaseLocal, + InheritFlags: []string{ + options.CfgPath, + options.CertificateKey, + }, } } @@ -110,6 +128,33 @@ func runControlPlanePrepareManifestsSubphase(c workflow.RunData) error { return controlplane.CreateInitStaticPodManifestFiles(kubeadmconstants.GetStaticPodDirectory(), cfg) } +func runControlPlanePrepareDownloadCertsPhaseLocal(c workflow.RunData) error { + data, ok := c.(JoinData) + if !ok { + return errors.New("download-certs phase invoked with an invalid data struct") + } + + if data.Cfg().ControlPlane == nil || len(data.CertificateKey()) == 0 { + klog.V(1).Infoln("[download-certs] Skipping certs download") + return nil + } + + cfg, err := data.InitCfg() + if err != nil { + return err + } + + client, err := bootstrapClient(data) + if err != nil { + return err + } + + if err := copycerts.DownloadCerts(client, cfg, data.CertificateKey()); err != nil { + return errors.Wrap(err, "error downloading certs") + } + return nil +} + func runControlPlanePrepareCertsPhaseLocal(c workflow.RunData) error { data, ok := c.(JoinData) if !ok { @@ -157,3 +202,15 @@ func runControlPlanePrepareKubeconfigPhaseLocal(c workflow.RunData) error { return nil } + +func bootstrapClient(data JoinData) (clientset.Interface, error) { + tlsBootstrapCfg, err := data.TLSBootstrapCfg() + if err != nil { + return nil, errors.Wrap(err, "unable to access the cluster") + } + client, err := kubeconfigutil.ToClientSet(tlsBootstrapCfg) + if err != nil { + return nil, errors.Wrap(err, "unable to access the cluster") + } + return client, nil +} diff --git a/cmd/kubeadm/app/cmd/phases/join/data.go b/cmd/kubeadm/app/cmd/phases/join/data.go index 1955419a9f..617dea2b00 100644 --- a/cmd/kubeadm/app/cmd/phases/join/data.go +++ b/cmd/kubeadm/app/cmd/phases/join/data.go @@ -28,6 +28,7 @@ import ( // JoinData is the interface to use for join phases. // The "joinData" type from "cmd/join.go" must satisfy this interface. type JoinData interface { + CertificateKey() string Cfg() *kubeadmapi.JoinConfiguration KubeConfigPath() string TLSBootstrapCfg() (*clientcmdapi.Config, error) diff --git a/cmd/kubeadm/app/cmd/phases/join/data_test.go b/cmd/kubeadm/app/cmd/phases/join/data_test.go index 8de201b393..ee10d91186 100644 --- a/cmd/kubeadm/app/cmd/phases/join/data_test.go +++ b/cmd/kubeadm/app/cmd/phases/join/data_test.go @@ -31,6 +31,7 @@ type testJoinData struct{} // testJoinData must satisfy JoinData. var _ JoinData = &testJoinData{} +func (j *testJoinData) CertificateKey() string { return "" } func (j *testJoinData) Cfg() *kubeadmapi.JoinConfiguration { return nil } func (j *testJoinData) KubeConfigPath() string { return "" } func (j *testJoinData) TLSBootstrapCfg() (*clientcmdapi.Config, error) { return nil, nil } diff --git a/cmd/kubeadm/app/cmd/phases/join/preflight.go b/cmd/kubeadm/app/cmd/phases/join/preflight.go index 6a4137f097..a0a43fb387 100644 --- a/cmd/kubeadm/app/cmd/phases/join/preflight.go +++ b/cmd/kubeadm/app/cmd/phases/join/preflight.go @@ -39,7 +39,7 @@ var ( kubeadm join phase preflight --config kubeadm-config.yml `) - notReadyToJoinControPlaneTemp = template.Must(template.New("join").Parse(dedent.Dedent(` + notReadyToJoinControlPlaneTemp = template.Must(template.New("join").Parse(dedent.Dedent(` One or more conditions for hosting a new control plane instance is not satisfied. {{.Error}} @@ -105,14 +105,15 @@ func runPreflight(c workflow.RunData) error { if j.Cfg().ControlPlane != nil { // Checks if the cluster configuration supports // joining a new control plane instance and if all the necessary certificates are provided - if err := checkIfReadyForAdditionalControlPlane(&initCfg.ClusterConfiguration); err != nil { + hasCertificateKey := len(j.CertificateKey()) > 0 + if err := checkIfReadyForAdditionalControlPlane(&initCfg.ClusterConfiguration, hasCertificateKey); err != nil { // outputs the not ready for hosting a new control plane instance message ctx := map[string]string{ "Error": err.Error(), } var msg bytes.Buffer - notReadyToJoinControPlaneTemp.Execute(&msg, ctx) + notReadyToJoinControlPlaneTemp.Execute(&msg, ctx) return errors.New(msg.String()) } @@ -134,15 +135,17 @@ func runPreflight(c workflow.RunData) error { // checkIfReadyForAdditionalControlPlane ensures that the cluster is in a state that supports // joining an additional control plane instance and if the node is ready to preflight -func checkIfReadyForAdditionalControlPlane(initConfiguration *kubeadmapi.ClusterConfiguration) error { +func checkIfReadyForAdditionalControlPlane(initConfiguration *kubeadmapi.ClusterConfiguration, hasCertificateKey bool) error { // blocks if the cluster was created without a stable control plane endpoint if initConfiguration.ControlPlaneEndpoint == "" { return errors.New("unable to add a new control plane instance a cluster that doesn't have a stable controlPlaneEndpoint address") } - // checks if the certificates that must be equal across contolplane instances are provided - if ret, err := certs.SharedCertificateExists(initConfiguration); !ret { - return err + if !hasCertificateKey { + // checks if the certificates that must be equal across controlplane instances are provided + if ret, err := certs.SharedCertificateExists(initConfiguration); !ret { + return err + } } return nil diff --git a/cmd/kubeadm/app/cmd/reset.go b/cmd/kubeadm/app/cmd/reset.go index b8e2b919eb..cac15ea31e 100644 --- a/cmd/kubeadm/app/cmd/reset.go +++ b/cmd/kubeadm/app/cmd/reset.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" "k8s.io/klog" + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" @@ -68,15 +69,20 @@ func NewCmdReset(in io.Reader, out io.Writer) *cobra.Command { kubeadmutil.CheckErr(err) } + cfg, err := configutil.FetchInitConfigurationFromCluster(client, os.Stdout, "reset", false) + if err != nil { + klog.Warningf("[reset] Unable to fetch the kubeadm-config ConfigMap from cluster: %v", err) + } + if criSocketPath == "" { - criSocketPath, err = resetDetectCRISocket(client) + criSocketPath, err = resetDetectCRISocket(cfg) kubeadmutil.CheckErr(err) klog.V(1).Infof("[reset] detected and using CRI socket: %s", criSocketPath) } r, err := NewReset(in, ignorePreflightErrorsSet, forceReset, certsDir, criSocketPath) kubeadmutil.CheckErr(err) - kubeadmutil.CheckErr(r.Run(out, client)) + kubeadmutil.CheckErr(r.Run(out, client, cfg)) }, } @@ -131,17 +137,19 @@ func NewReset(in io.Reader, ignorePreflightErrors sets.String, forceReset bool, } // Run reverts any changes made to this host by "kubeadm init" or "kubeadm join". -func (r *Reset) Run(out io.Writer, client clientset.Interface) error { +func (r *Reset) Run(out io.Writer, client clientset.Interface, cfg *kubeadmapi.InitConfiguration) error { var dirsToClean []string // Only clear etcd data when using local etcd. etcdManifestPath := filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.ManifestsSubDirName, "etcd.yaml") klog.V(1).Infof("[reset] checking for etcd config") - etcdDataDir, err := getEtcdDataDir(etcdManifestPath, client) + etcdDataDir, err := getEtcdDataDir(etcdManifestPath, cfg) if err == nil { dirsToClean = append(dirsToClean, etcdDataDir) - if err := removeEtcdMember(client); err != nil { - klog.Warningf("[reset] failed to remove etcd member: %v\n.Please manually remove this etcd member using etcdctl", err) + if cfg != nil { + if err := etcdphase.RemoveStackedEtcdMemberFromCluster(client, cfg); err != nil { + klog.Warningf("[reset] failed to remove etcd member: %v\n.Please manually remove this etcd member using etcdctl", err) + } } } else { fmt.Println("[reset] no etcd config found. Assuming external etcd") @@ -209,25 +217,14 @@ func (r *Reset) Run(out io.Writer, client clientset.Interface) error { return nil } -func removeEtcdMember(client clientset.Interface) error { - cfg, err := configutil.FetchInitConfigurationFromCluster(client, os.Stdout, "reset", false) - if err != nil { - return err - } - return etcdphase.RemoveStackedEtcdMemberFromCluster(client, cfg) -} - -func getEtcdDataDir(manifestPath string, client clientset.Interface) (string, error) { +func getEtcdDataDir(manifestPath string, cfg *kubeadmapi.InitConfiguration) (string, error) { const etcdVolumeName = "etcd-data" var dataDir string - if client != nil { - cfg, err := configutil.FetchInitConfigurationFromCluster(client, os.Stdout, "reset", false) - if err == nil && cfg.Etcd.Local != nil { - return cfg.Etcd.Local.DataDir, nil - } - klog.Warningf("[reset] Unable to fetch the kubeadm-config ConfigMap, using etcd pod spec as fallback: %v", err) + if cfg != nil && cfg.Etcd.Local != nil { + return cfg.Etcd.Local.DataDir, nil } + klog.Warningln("[reset] No kubeadm config, using etcd pod spec to get data directory") etcdPod, err := utilstaticpod.ReadStaticPodFromDisk(manifestPath) if err != nil { @@ -311,13 +308,10 @@ func resetConfigDir(configPathDir, pkiPathDir string) { } } -func resetDetectCRISocket(client clientset.Interface) (string, error) { - if client != nil { - // first try to connect to the cluster for the CRI socket - cfg, err := configutil.FetchInitConfigurationFromCluster(client, os.Stdout, "reset", false) - if err == nil { - return cfg.NodeRegistration.CRISocket, nil - } +func resetDetectCRISocket(cfg *kubeadmapi.InitConfiguration) (string, error) { + if cfg != nil { + // first try to get the CRI socket from the cluster configuration + return cfg.NodeRegistration.CRISocket, nil } // if this fails, try to detect it diff --git a/cmd/kubeadm/app/cmd/reset_test.go b/cmd/kubeadm/app/cmd/reset_test.go index 6908b20e94..7c2e560dc8 100644 --- a/cmd/kubeadm/app/cmd/reset_test.go +++ b/cmd/kubeadm/app/cmd/reset_test.go @@ -25,7 +25,7 @@ import ( "github.com/lithammer/dedent" - clientsetfake "k8s.io/client-go/kubernetes/fake" + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" @@ -275,38 +275,38 @@ func TestGetEtcdDataDir(t *testing.T) { podYaml string expectErr bool writeManifest bool - validClient bool + validConfig bool }{ "non-existent file returns error": { expectErr: true, writeManifest: false, - validClient: true, + validConfig: true, }, "return etcd data dir": { dataDir: "/path/to/etcd", podYaml: etcdPod, expectErr: false, writeManifest: true, - validClient: true, + validConfig: true, }, "invalid etcd pod": { podYaml: etcdPodInvalid, expectErr: true, writeManifest: true, - validClient: true, + validConfig: true, }, "etcd pod spec without data volume": { podYaml: etcdPodWithoutDataVolume, expectErr: true, writeManifest: true, - validClient: true, + validConfig: true, }, "kubeconfig file doesn't exist": { dataDir: "/path/to/etcd", podYaml: etcdPod, expectErr: false, writeManifest: true, - validClient: false, + validConfig: false, }, } @@ -325,9 +325,9 @@ func TestGetEtcdDataDir(t *testing.T) { var dataDir string var err error - if test.validClient { - client := clientsetfake.NewSimpleClientset() - dataDir, err = getEtcdDataDir(manifestPath, client) + if test.validConfig { + cfg := &kubeadmapi.InitConfiguration{} + dataDir, err = getEtcdDataDir(manifestPath, cfg) } else { dataDir, err = getEtcdDataDir(manifestPath, nil) } diff --git a/cmd/kubeadm/app/cmd/upgrade/diff_test.go b/cmd/kubeadm/app/cmd/upgrade/diff_test.go index a4de23b104..9c10928f33 100644 --- a/cmd/kubeadm/app/cmd/upgrade/diff_test.go +++ b/cmd/kubeadm/app/cmd/upgrade/diff_test.go @@ -22,7 +22,7 @@ import ( ) const ( - testUpgradeDiffConfig = `testdata/diff_master_config.yaml` + testUpgradeDiffConfig = `testdata/diff_controlplane_config.yaml` testUpgradeDiffManifest = `testdata/diff_dummy_manifest.yaml` ) diff --git a/cmd/kubeadm/app/cmd/upgrade/testdata/diff_master_config.yaml b/cmd/kubeadm/app/cmd/upgrade/testdata/diff_controlplane_config.yaml similarity index 100% rename from cmd/kubeadm/app/cmd/upgrade/testdata/diff_master_config.yaml rename to cmd/kubeadm/app/cmd/upgrade/testdata/diff_controlplane_config.yaml diff --git a/cmd/kubeadm/app/cmd/util/join.go b/cmd/kubeadm/app/cmd/util/join.go index c05bc7e1ac..1229ab64f0 100644 --- a/cmd/kubeadm/app/cmd/util/join.go +++ b/cmd/kubeadm/app/cmd/util/join.go @@ -30,7 +30,7 @@ import ( ) var joinCommandTemplate = template.Must(template.New("join").Parse(`` + - `kubeadm join {{.MasterHostPort}} --token {{.Token}}{{range $h := .CAPubKeyPins}} --discovery-token-ca-cert-hash {{$h}}{{end}}{{if .UploadCerts}} --certificate-key {{.CertificateKey}}{{end}}`, + `kubeadm join {{.ControlPlaneHostPort}} --token {{.Token}}{{range $h := .CAPubKeyPins}} --discovery-token-ca-cert-hash {{$h}}{{end}}{{if .UploadCerts}} --certificate-key {{.CertificateKey}}{{end}}`, )) // GetJoinCommand returns the kubeadm join command for a given token and @@ -71,11 +71,11 @@ func GetJoinCommand(kubeConfigFile, token, key string, skipTokenPrint, uploadCer } ctx := map[string]interface{}{ - "Token": token, - "CAPubKeyPins": publicKeyPins, - "MasterHostPort": strings.Replace(clusterConfig.Server, "https://", "", -1), - "UploadCerts": uploadCerts, - "CertificateKey": key, + "Token": token, + "CAPubKeyPins": publicKeyPins, + "ControlPlaneHostPort": strings.Replace(clusterConfig.Server, "https://", "", -1), + "UploadCerts": uploadCerts, + "CertificateKey": key, } if skipTokenPrint { diff --git a/cmd/kubeadm/app/discovery/token/token.go b/cmd/kubeadm/app/discovery/token/token.go index 3f5b0c0116..2494d8c166 100644 --- a/cmd/kubeadm/app/discovery/token/token.go +++ b/cmd/kubeadm/app/discovery/token/token.go @@ -172,16 +172,16 @@ func RetrieveValidatedConfigInfo(cfg *kubeadmapi.JoinConfiguration) (*clientcmda // buildInsecureBootstrapKubeConfig makes a kubeconfig object that connects insecurely to the API Server for bootstrapping purposes func buildInsecureBootstrapKubeConfig(endpoint, clustername string) *clientcmdapi.Config { - masterEndpoint := fmt.Sprintf("https://%s", endpoint) - bootstrapConfig := kubeconfigutil.CreateBasic(masterEndpoint, clustername, BootstrapUser, []byte{}) + controlPlaneEndpoint := fmt.Sprintf("https://%s", endpoint) + bootstrapConfig := kubeconfigutil.CreateBasic(controlPlaneEndpoint, clustername, BootstrapUser, []byte{}) bootstrapConfig.Clusters[clustername].InsecureSkipTLSVerify = true return bootstrapConfig } // buildSecureBootstrapKubeConfig makes a kubeconfig object that connects securely to the API Server for bootstrapping purposes (validating with the specified CA) func buildSecureBootstrapKubeConfig(endpoint string, caCert []byte, clustername string) *clientcmdapi.Config { - masterEndpoint := fmt.Sprintf("https://%s", endpoint) - bootstrapConfig := kubeconfigutil.CreateBasic(masterEndpoint, clustername, BootstrapUser, caCert) + controlPlaneEndpoint := fmt.Sprintf("https://%s", endpoint) + bootstrapConfig := kubeconfigutil.CreateBasic(controlPlaneEndpoint, clustername, BootstrapUser, caCert) return bootstrapConfig } diff --git a/cmd/kubeadm/app/phases/uploadcerts/BUILD b/cmd/kubeadm/app/phases/copycerts/BUILD similarity index 82% rename from cmd/kubeadm/app/phases/uploadcerts/BUILD rename to cmd/kubeadm/app/phases/copycerts/BUILD index 89d6cbc038..09699b6114 100644 --- a/cmd/kubeadm/app/phases/uploadcerts/BUILD +++ b/cmd/kubeadm/app/phases/copycerts/BUILD @@ -2,8 +2,8 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", - srcs = ["uploadcerts.go"], - importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadcerts", + srcs = ["copycerts.go"], + importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/copycerts", visibility = ["//visibility:public"], deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", @@ -14,9 +14,12 @@ go_library( "//pkg/apis/rbac/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/rbac/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/util/cert:go_default_library", + "//staging/src/k8s.io/client-go/util/keyutil:go_default_library", "//staging/src/k8s.io/cluster-bootstrap/token/util:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", ], @@ -38,7 +41,7 @@ filegroup( go_test( name = "go_default_test", - srcs = ["uploadcerts_test.go"], + srcs = ["copycerts_test.go"], embed = [":go_default_library"], deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", diff --git a/cmd/kubeadm/app/phases/uploadcerts/uploadcerts.go b/cmd/kubeadm/app/phases/copycerts/copycerts.go similarity index 67% rename from cmd/kubeadm/app/phases/uploadcerts/uploadcerts.go rename to cmd/kubeadm/app/phases/copycerts/copycerts.go index 9e5b1f0e1d..229be0a22f 100644 --- a/cmd/kubeadm/app/phases/uploadcerts/uploadcerts.go +++ b/cmd/kubeadm/app/phases/copycerts/copycerts.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package uploadcerts +package copycerts import ( "encoding/hex" @@ -28,9 +28,12 @@ import ( v1 "k8s.io/api/core/v1" rbac "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" clientset "k8s.io/client-go/kubernetes" + certutil "k8s.io/client-go/util/cert" + keyutil "k8s.io/client-go/util/keyutil" bootstraputil "k8s.io/cluster-bootstrap/token/util" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" @@ -92,7 +95,7 @@ func UploadCerts(client clientset.Interface, cfg *kubeadmapi.InitConfiguration, return err } - secretData, err := getSecretData(cfg, decodedKey) + secretData, err := getDataFromDisk(cfg, decodedKey) if err != nil { return err } @@ -169,7 +172,7 @@ func loadAndEncryptCert(certPath string, key []byte) ([]byte, error) { return cryptoutil.EncryptBytes(cert, key) } -func certsToUpload(cfg *kubeadmapi.InitConfiguration) map[string]string { +func certsToTransfer(cfg *kubeadmapi.InitConfiguration) map[string]string { certsDir := cfg.CertificatesDir certs := map[string]string{ kubeadmconstants.CACertName: path.Join(certsDir, kubeadmconstants.CACertName), @@ -191,15 +194,85 @@ func certsToUpload(cfg *kubeadmapi.InitConfiguration) map[string]string { return certs } -func getSecretData(cfg *kubeadmapi.InitConfiguration, key []byte) (map[string][]byte, error) { +func getDataFromDisk(cfg *kubeadmapi.InitConfiguration, key []byte) (map[string][]byte, error) { secretData := map[string][]byte{} - for certName, certPath := range certsToUpload(cfg) { + for certName, certPath := range certsToTransfer(cfg) { cert, err := loadAndEncryptCert(certPath, key) if err == nil || (err != nil && os.IsNotExist(err)) { - secretData[strings.Replace(certName, "/", "-", -1)] = cert + secretData[certOrKeyNameToSecretName(certName)] = cert } else { return nil, err } } return secretData, nil } + +// DownloadCerts downloads the certificates needed to join a new control plane. +func DownloadCerts(client clientset.Interface, cfg *kubeadmapi.InitConfiguration, key string) error { + fmt.Printf("[download-certs] downloading the certificates in Secret %q in the %q Namespace\n", kubeadmconstants.KubeadmCertsSecret, metav1.NamespaceSystem) + + decodedKey, err := hex.DecodeString(key) + if err != nil { + return errors.Wrap(err, "error decoding certificate key") + } + + secret, err := getSecret(client) + if err != nil { + return errors.Wrap(err, "error downloading the secret") + } + + secretData, err := getDataFromSecret(secret, decodedKey) + if err != nil { + return errors.Wrap(err, "error decoding secret data with provided key") + } + + for certOrKeyName, certOrKeyPath := range certsToTransfer(cfg) { + certOrKeyData, found := secretData[certOrKeyNameToSecretName(certOrKeyName)] + if !found { + return errors.New("couldn't find required certificate or key in Secret") + } + if err := writeCertOrKey(certOrKeyPath, certOrKeyData); err != nil { + return err + } + } + + return nil +} + +func writeCertOrKey(certOrKeyPath string, certOrKeyData []byte) error { + if _, err := keyutil.ParsePublicKeysPEM(certOrKeyData); err == nil { + return keyutil.WriteKey(certOrKeyPath, certOrKeyData) + } else if _, err := certutil.ParseCertsPEM(certOrKeyData); err == nil { + return certutil.WriteCert(certOrKeyPath, certOrKeyData) + } + return errors.New("unknown data found in Secret entry") +} + +func getSecret(client clientset.Interface) (*v1.Secret, error) { + secret, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Get(kubeadmconstants.KubeadmCertsSecret, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + return nil, errors.Errorf("Secret %q was not found in the %q Namespace. This Secret might have expired. Please, run `kubeadm init phase upload-certs` on a control plane to generate a new one", kubeadmconstants.KubeadmCertsSecret, metav1.NamespaceSystem) + } + return nil, err + } + return secret, nil +} + +func getDataFromSecret(secret *v1.Secret, key []byte) (map[string][]byte, error) { + secretData := map[string][]byte{} + for certName, encryptedCert := range secret.Data { + cert, err := cryptoutil.DecryptBytes(encryptedCert, key) + if err != nil { + // If any of the decrypt operations fail do not return a partial result, + // return an empty result immediately + return map[string][]byte{}, err + } + secretData[certName] = cert + } + return secretData, nil +} + +func certOrKeyNameToSecretName(certOrKeyName string) string { + return strings.Replace(certOrKeyName, "/", "-", -1) +} diff --git a/cmd/kubeadm/app/phases/uploadcerts/uploadcerts_test.go b/cmd/kubeadm/app/phases/copycerts/copycerts_test.go similarity index 66% rename from cmd/kubeadm/app/phases/uploadcerts/uploadcerts_test.go rename to cmd/kubeadm/app/phases/copycerts/copycerts_test.go index a8d2faf250..3cdd762d8a 100644 --- a/cmd/kubeadm/app/phases/uploadcerts/uploadcerts_test.go +++ b/cmd/kubeadm/app/phases/copycerts/copycerts_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package uploadcerts +package copycerts import ( "encoding/hex" @@ -38,7 +38,7 @@ func TestUploadCerts(t *testing.T) { } //teste cert name, teste cert can be decrypted -func TestGetSecretData(t *testing.T) { +func TestGetDataFromInitConfig(t *testing.T) { certData := []byte("cert-data") tmpdir := testutil.SetupTempDir(t) defer os.RemoveAll(tmpdir) @@ -58,14 +58,14 @@ func TestGetSecretData(t *testing.T) { t.Fatalf(dedent.Dedent("failed to create etcd cert dir.\nfatal error: %v"), err) } - certs := certsToUpload(cfg) + certs := certsToTransfer(cfg) for name, path := range certs { if err := ioutil.WriteFile(path, certData, 0644); err != nil { t.Fatalf(dedent.Dedent("failed to write cert: %s\nfatal error: %v"), name, err) } } - secretData, err := getSecretData(cfg, decodedKey) + secretData, err := getDataFromDisk(cfg, decodedKey) if err != nil { t.Fatalf("failed to get secret data. fatal error: %v", err) } @@ -83,29 +83,44 @@ func TestGetSecretData(t *testing.T) { } } -func TestCertsToUpload(t *testing.T) { +func TestCertsToTransfer(t *testing.T) { localEtcdCfg := &kubeadmapi.InitConfiguration{} externalEtcdCfg := &kubeadmapi.InitConfiguration{} externalEtcdCfg.Etcd = kubeadmapi.Etcd{} externalEtcdCfg.Etcd.External = &kubeadmapi.ExternalEtcd{} + commonExpectedCerts := []string{ + kubeadmconstants.CACertName, + kubeadmconstants.CAKeyName, + kubeadmconstants.FrontProxyCACertName, + kubeadmconstants.FrontProxyCAKeyName, + kubeadmconstants.ServiceAccountPublicKeyName, + kubeadmconstants.ServiceAccountPrivateKeyName, + } + tests := map[string]struct { config *kubeadmapi.InitConfiguration expectedCerts []string }{ "local etcd": { - config: localEtcdCfg, - expectedCerts: []string{kubeadmconstants.EtcdCACertName, kubeadmconstants.EtcdCAKeyName}, + config: localEtcdCfg, + expectedCerts: append( + []string{kubeadmconstants.EtcdCACertName, kubeadmconstants.EtcdCAKeyName}, + commonExpectedCerts..., + ), }, "external etcd": { - config: externalEtcdCfg, - expectedCerts: []string{externalEtcdCA, externalEtcdCert, externalEtcdKey}, + config: externalEtcdCfg, + expectedCerts: append( + []string{externalEtcdCA, externalEtcdCert, externalEtcdKey}, + commonExpectedCerts..., + ), }, } for name, test := range tests { t.Run(name, func(t2 *testing.T) { - certList := certsToUpload(test.config) + certList := certsToTransfer(test.config) for _, cert := range test.expectedCerts { if _, found := certList[cert]; !found { t2.Fatalf(dedent.Dedent("failed to get list of certs to upload\ncert %s not found"), cert) @@ -114,3 +129,30 @@ func TestCertsToUpload(t *testing.T) { }) } } + +func TestCertOrKeyNameToSecretName(t *testing.T) { + tests := []struct { + keyName string + expectedSecretName string + }{ + { + keyName: "apiserver-kubelet-client.crt", + expectedSecretName: "apiserver-kubelet-client.crt", + }, + { + keyName: "etcd/ca.crt", + expectedSecretName: "etcd-ca.crt", + }, + { + keyName: "etcd/healthcheck-client.crt", + expectedSecretName: "etcd-healthcheck-client.crt", + }, + } + + for _, tc := range tests { + secretName := certOrKeyNameToSecretName(tc.keyName) + if secretName != tc.expectedSecretName { + t.Fatalf("secret name %s didn't match expected name %s", secretName, tc.expectedSecretName) + } + } +} diff --git a/cmd/kubeadm/app/preflight/BUILD b/cmd/kubeadm/app/preflight/BUILD index 88c7dd9b69..bd91dd56b1 100644 --- a/cmd/kubeadm/app/preflight/BUILD +++ b/cmd/kubeadm/app/preflight/BUILD @@ -23,6 +23,7 @@ go_library( "//cmd/kubeadm/app/images:go_default_library", "//cmd/kubeadm/app/util/runtime:go_default_library", "//cmd/kubeadm/app/util/system:go_default_library", + "//pkg/master/ports:go_default_library", "//pkg/registry/core/service/ipallocator:go_default_library", "//pkg/util/initsystem:go_default_library", "//pkg/util/ipvs:go_default_library", diff --git a/cmd/kubeadm/app/preflight/checks.go b/cmd/kubeadm/app/preflight/checks.go index 5509c7d80d..1b159b07ed 100644 --- a/cmd/kubeadm/app/preflight/checks.go +++ b/cmd/kubeadm/app/preflight/checks.go @@ -46,6 +46,7 @@ import ( "k8s.io/kubernetes/cmd/kubeadm/app/images" utilruntime "k8s.io/kubernetes/cmd/kubeadm/app/util/runtime" "k8s.io/kubernetes/cmd/kubeadm/app/util/system" + "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" "k8s.io/kubernetes/pkg/util/initsystem" ipvsutil "k8s.io/kubernetes/pkg/util/ipvs" @@ -882,10 +883,10 @@ func RunInitNodeChecks(execer utilsexec.Interface, cfg *kubeadmapi.InitConfigura checks := []Checker{ NumCPUCheck{NumCPU: kubeadmconstants.ControlPlaneNumCPU}, KubernetesVersionCheck{KubernetesVersion: cfg.KubernetesVersion, KubeadmVersion: kubeadmversion.Get().GitVersion}, - FirewalldCheck{ports: []int{int(cfg.LocalAPIEndpoint.BindPort), 10250}}, + FirewalldCheck{ports: []int{int(cfg.LocalAPIEndpoint.BindPort), ports.KubeletPort}}, PortOpenCheck{port: int(cfg.LocalAPIEndpoint.BindPort)}, - PortOpenCheck{port: 10251}, - PortOpenCheck{port: 10252}, + PortOpenCheck{port: ports.InsecureSchedulerPort}, + PortOpenCheck{port: ports.InsecureKubeControllerManagerPort}, FileAvailableCheck{Path: kubeadmconstants.GetStaticPodFilepath(kubeadmconstants.KubeAPIServer, manifestsDir)}, FileAvailableCheck{Path: kubeadmconstants.GetStaticPodFilepath(kubeadmconstants.KubeControllerManager, manifestsDir)}, FileAvailableCheck{Path: kubeadmconstants.GetStaticPodFilepath(kubeadmconstants.KubeScheduler, manifestsDir)}, @@ -1037,7 +1038,7 @@ func addCommonChecks(execer utilsexec.Interface, cfg kubeadmapi.CommonConfigurat HostnameCheck{nodeName: cfg.GetNodeName()}, KubeletVersionCheck{KubernetesVersion: cfg.GetKubernetesVersion(), exec: execer}, ServiceCheck{Service: "kubelet", CheckIfActive: false}, - PortOpenCheck{port: 10250}) + PortOpenCheck{port: ports.KubeletPort}) return checks } diff --git a/cmd/kubeadm/app/util/apiclient/idempotency_test.go b/cmd/kubeadm/app/util/apiclient/idempotency_test.go index b4ac6a4363..d306f62268 100644 --- a/cmd/kubeadm/app/util/apiclient/idempotency_test.go +++ b/cmd/kubeadm/app/util/apiclient/idempotency_test.go @@ -62,7 +62,7 @@ func TestPatchNodeNonErrorCases(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { client := fake.NewSimpleClientset() - _, err := client.Core().Nodes().Create(&tc.node) + _, err := client.CoreV1().Nodes().Create(&tc.node) if err != nil { t.Fatalf("failed to create node to fake client: %v", err) } diff --git a/cmd/kubeadm/app/util/config/initconfiguration_test.go b/cmd/kubeadm/app/util/config/initconfiguration_test.go index cc11b97921..93801a8c88 100644 --- a/cmd/kubeadm/app/util/config/initconfiguration_test.go +++ b/cmd/kubeadm/app/util/config/initconfiguration_test.go @@ -34,16 +34,16 @@ import ( ) const ( - masterV1alpha3YAML = "testdata/conversion/master/v1alpha3.yaml" - masterV1alpha3YAMLNonLinux = "testdata/conversion/master/v1alpha3_non_linux.yaml" - masterV1beta1YAML = "testdata/conversion/master/v1beta1.yaml" - masterV1beta1YAMLNonLinux = "testdata/conversion/master/v1beta1_non_linux.yaml" - masterInternalYAML = "testdata/conversion/master/internal.yaml" - masterInternalYAMLNonLinux = "testdata/conversion/master/internal_non_linux.yaml" - masterIncompleteYAML = "testdata/defaulting/master/incomplete.yaml" - masterDefaultedYAML = "testdata/defaulting/master/defaulted.yaml" - masterDefaultedYAMLNonLinux = "testdata/defaulting/master/defaulted_non_linux.yaml" - masterInvalidYAML = "testdata/validation/invalid_mastercfg.yaml" + controlPlaneV1alpha3YAML = "testdata/conversion/controlplane/v1alpha3.yaml" + controlPlaneV1alpha3YAMLNonLinux = "testdata/conversion/controlplane/v1alpha3_non_linux.yaml" + controlPlaneV1beta1YAML = "testdata/conversion/controlplane/v1beta1.yaml" + controlPlaneV1beta1YAMLNonLinux = "testdata/conversion/controlplane/v1beta1_non_linux.yaml" + controlPlaneInternalYAML = "testdata/conversion/controlplane/internal.yaml" + controlPlaneInternalYAMLNonLinux = "testdata/conversion/controlplane/internal_non_linux.yaml" + controlPlaneIncompleteYAML = "testdata/defaulting/controlplane/incomplete.yaml" + controlPlaneDefaultedYAML = "testdata/defaulting/controlplane/defaulted.yaml" + controlPlaneDefaultedYAMLNonLinux = "testdata/defaulting/controlplane/defaulted_non_linux.yaml" + controlPlaneInvalidYAML = "testdata/validation/invalid_controlplanecfg.yaml" ) func diff(expected, actual []byte) string { @@ -141,15 +141,15 @@ func TestLoadInitConfigurationFromFile(t *testing.T) { } func TestInitConfigurationMarshallingFromFile(t *testing.T) { - masterV1alpha3YAMLAbstracted := masterV1alpha3YAML - masterV1beta1YAMLAbstracted := masterV1beta1YAML - masterInternalYAMLAbstracted := masterInternalYAML - masterDefaultedYAMLAbstracted := masterDefaultedYAML + controlPlaneV1alpha3YAMLAbstracted := controlPlaneV1alpha3YAML + controlPlaneV1beta1YAMLAbstracted := controlPlaneV1beta1YAML + controlPlaneInternalYAMLAbstracted := controlPlaneInternalYAML + controlPlaneDefaultedYAMLAbstracted := controlPlaneDefaultedYAML if runtime.GOOS != "linux" { - masterV1alpha3YAMLAbstracted = masterV1alpha3YAMLNonLinux - masterV1beta1YAMLAbstracted = masterV1beta1YAMLNonLinux - masterInternalYAMLAbstracted = masterInternalYAMLNonLinux - masterDefaultedYAMLAbstracted = masterDefaultedYAMLNonLinux + controlPlaneV1alpha3YAMLAbstracted = controlPlaneV1alpha3YAMLNonLinux + controlPlaneV1beta1YAMLAbstracted = controlPlaneV1beta1YAMLNonLinux + controlPlaneInternalYAMLAbstracted = controlPlaneInternalYAMLNonLinux + controlPlaneDefaultedYAMLAbstracted = controlPlaneDefaultedYAMLNonLinux } var tests = []struct { @@ -161,32 +161,32 @@ func TestInitConfigurationMarshallingFromFile(t *testing.T) { // and then marshals the internal object to the expected groupVersion { // v1alpha3 -> internal name: "v1alpha3IsDeprecated", - in: masterV1alpha3YAMLAbstracted, + in: controlPlaneV1alpha3YAMLAbstracted, expectedErr: true, }, { // v1beta1 -> internal name: "v1beta1ToInternal", - in: masterV1beta1YAMLAbstracted, - out: masterInternalYAMLAbstracted, + in: controlPlaneV1beta1YAMLAbstracted, + out: controlPlaneInternalYAMLAbstracted, groupVersion: kubeadm.SchemeGroupVersion, }, { // v1beta1 -> internal -> v1beta1 name: "v1beta1Tov1beta1", - in: masterV1beta1YAMLAbstracted, - out: masterV1beta1YAMLAbstracted, + in: controlPlaneV1beta1YAMLAbstracted, + out: controlPlaneV1beta1YAMLAbstracted, groupVersion: kubeadmapiv1beta1.SchemeGroupVersion, }, // These tests are reading one file that has only a subset of the fields populated, loading it using LoadInitConfigurationFromFile, // and then marshals the internal object to the expected groupVersion { // v1beta1 -> default -> validate -> internal -> v1beta1 name: "incompleteYAMLToDefaultedv1beta1", - in: masterIncompleteYAML, - out: masterDefaultedYAMLAbstracted, + in: controlPlaneIncompleteYAML, + out: controlPlaneDefaultedYAMLAbstracted, groupVersion: kubeadmapiv1beta1.SchemeGroupVersion, }, { // v1beta1 -> validation should fail name: "invalidYAMLShouldFail", - in: masterInvalidYAML, + in: controlPlaneInvalidYAML, expectedErr: true, }, } diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/controlplane/internal.yaml similarity index 99% rename from cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml rename to cmd/kubeadm/app/util/config/testdata/conversion/controlplane/internal.yaml index 1753225c3a..17891246e2 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/controlplane/internal.yaml @@ -198,7 +198,7 @@ Networking: NodeRegistration: CRISocket: /var/run/dockershim.sock KubeletExtraArgs: null - Name: master-1 + Name: control-plane-1 Taints: - effect: NoSchedule key: node-role.kubernetes.io/master diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/internal_non_linux.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/controlplane/internal_non_linux.yaml similarity index 99% rename from cmd/kubeadm/app/util/config/testdata/conversion/master/internal_non_linux.yaml rename to cmd/kubeadm/app/util/config/testdata/conversion/controlplane/internal_non_linux.yaml index 438ba49899..b0393468ef 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/internal_non_linux.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/controlplane/internal_non_linux.yaml @@ -197,7 +197,7 @@ Networking: NodeRegistration: CRISocket: /var/run/dockershim.sock KubeletExtraArgs: null - Name: master-1 + Name: control-plane-1 Taints: - effect: NoSchedule key: node-role.kubernetes.io/master diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha3.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/controlplane/v1alpha3.yaml similarity index 99% rename from cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha3.yaml rename to cmd/kubeadm/app/util/config/testdata/conversion/controlplane/v1alpha3.yaml index 27731830fe..1317a1c306 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha3.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/controlplane/v1alpha3.yaml @@ -13,7 +13,7 @@ bootstrapTokens: kind: InitConfiguration nodeRegistration: criSocket: /var/run/dockershim.sock - name: master-1 + name: control-plane-1 taints: - effect: NoSchedule key: node-role.kubernetes.io/master diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha3_non_linux.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/controlplane/v1alpha3_non_linux.yaml similarity index 99% rename from cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha3_non_linux.yaml rename to cmd/kubeadm/app/util/config/testdata/conversion/controlplane/v1alpha3_non_linux.yaml index 344e96e2db..eb91bf825d 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha3_non_linux.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/controlplane/v1alpha3_non_linux.yaml @@ -13,7 +13,7 @@ bootstrapTokens: kind: InitConfiguration nodeRegistration: criSocket: /var/run/dockershim.sock - name: master-1 + name: control-plane-1 taints: - effect: NoSchedule key: node-role.kubernetes.io/master diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1beta1.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/controlplane/v1beta1.yaml similarity index 99% rename from cmd/kubeadm/app/util/config/testdata/conversion/master/v1beta1.yaml rename to cmd/kubeadm/app/util/config/testdata/conversion/controlplane/v1beta1.yaml index 91d521cb89..9dd7570e4f 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1beta1.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/controlplane/v1beta1.yaml @@ -13,7 +13,7 @@ localAPIEndpoint: bindPort: 6443 nodeRegistration: criSocket: /var/run/dockershim.sock - name: master-1 + name: control-plane-1 taints: - effect: NoSchedule key: node-role.kubernetes.io/master diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1beta1_non_linux.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/controlplane/v1beta1_non_linux.yaml similarity index 99% rename from cmd/kubeadm/app/util/config/testdata/conversion/master/v1beta1_non_linux.yaml rename to cmd/kubeadm/app/util/config/testdata/conversion/controlplane/v1beta1_non_linux.yaml index 6fcc9049a1..2b1d5293a9 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1beta1_non_linux.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/controlplane/v1beta1_non_linux.yaml @@ -13,7 +13,7 @@ localAPIEndpoint: bindPort: 6443 nodeRegistration: criSocket: /var/run/dockershim.sock - name: master-1 + name: control-plane-1 taints: - effect: NoSchedule key: node-role.kubernetes.io/master diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/node/internal.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/node/internal.yaml index 66219eaabc..c83e611a10 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/node/internal.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/node/internal.yaml @@ -15,7 +15,7 @@ Discovery: NodeRegistration: CRISocket: /var/run/dockershim.sock KubeletExtraArgs: null - Name: master-1 + Name: control-plane-1 Taints: - effect: NoSchedule key: node-role.kubernetes.io/master diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/node/v1alpha3.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/node/v1alpha3.yaml index a2aef71214..607e047a45 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/node/v1alpha3.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/node/v1alpha3.yaml @@ -13,7 +13,7 @@ discoveryTokenUnsafeSkipCAVerification: true kind: JoinConfiguration nodeRegistration: criSocket: /var/run/dockershim.sock - name: master-1 + name: control-plane-1 taints: - effect: NoSchedule key: node-role.kubernetes.io/master diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/node/v1beta1.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/node/v1beta1.yaml index 05ce7a0083..90bd743014 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/node/v1beta1.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/node/v1beta1.yaml @@ -14,7 +14,7 @@ discovery: kind: JoinConfiguration nodeRegistration: criSocket: /var/run/dockershim.sock - name: master-1 + name: control-plane-1 taints: - effect: NoSchedule key: node-role.kubernetes.io/master diff --git a/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml b/cmd/kubeadm/app/util/config/testdata/defaulting/controlplane/defaulted.yaml similarity index 99% rename from cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml rename to cmd/kubeadm/app/util/config/testdata/defaulting/controlplane/defaulted.yaml index 459029bf30..813b161c93 100644 --- a/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml +++ b/cmd/kubeadm/app/util/config/testdata/defaulting/controlplane/defaulted.yaml @@ -13,7 +13,7 @@ localAPIEndpoint: bindPort: 6443 nodeRegistration: criSocket: /var/run/criruntime.sock - name: master-1 + name: control-plane-1 taints: - effect: NoSchedule key: node-role.kubernetes.io/master diff --git a/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted_non_linux.yaml b/cmd/kubeadm/app/util/config/testdata/defaulting/controlplane/defaulted_non_linux.yaml similarity index 99% rename from cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted_non_linux.yaml rename to cmd/kubeadm/app/util/config/testdata/defaulting/controlplane/defaulted_non_linux.yaml index 2edebfb2d7..c3124af1f7 100644 --- a/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted_non_linux.yaml +++ b/cmd/kubeadm/app/util/config/testdata/defaulting/controlplane/defaulted_non_linux.yaml @@ -13,7 +13,7 @@ localAPIEndpoint: bindPort: 6443 nodeRegistration: criSocket: /var/run/criruntime.sock - name: master-1 + name: control-plane-1 taints: - effect: NoSchedule key: node-role.kubernetes.io/master diff --git a/cmd/kubeadm/app/util/config/testdata/defaulting/master/incomplete.yaml b/cmd/kubeadm/app/util/config/testdata/defaulting/controlplane/incomplete.yaml similarity index 89% rename from cmd/kubeadm/app/util/config/testdata/defaulting/master/incomplete.yaml rename to cmd/kubeadm/app/util/config/testdata/defaulting/controlplane/incomplete.yaml index a6ce8a5017..f945cec610 100644 --- a/cmd/kubeadm/app/util/config/testdata/defaulting/master/incomplete.yaml +++ b/cmd/kubeadm/app/util/config/testdata/defaulting/controlplane/incomplete.yaml @@ -6,7 +6,7 @@ localAPIEndpoint: advertiseAddress: 192.168.2.2 nodeRegistration: criSocket: /var/run/criruntime.sock - name: master-1 + name: control-plane-1 --- apiVersion: kubeadm.k8s.io/v1beta1 certificatesDir: /var/lib/kubernetes/pki diff --git a/cmd/kubeadm/app/util/config/testdata/validation/invalid_mastercfg.yaml b/cmd/kubeadm/app/util/config/testdata/validation/invalid_controlplanecfg.yaml similarity index 100% rename from cmd/kubeadm/app/util/config/testdata/validation/invalid_mastercfg.yaml rename to cmd/kubeadm/app/util/config/testdata/validation/invalid_controlplanecfg.yaml diff --git a/hack/.golint_failures b/hack/.golint_failures index 36e4a37e05..c028ddbbcc 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -225,14 +225,11 @@ pkg/quota/v1/evaluator/core pkg/registry/admissionregistration/mutatingwebhookconfiguration/storage pkg/registry/admissionregistration/rest pkg/registry/admissionregistration/validatingwebhookconfiguration/storage -pkg/registry/apps/daemonset pkg/registry/apps/daemonset/storage -pkg/registry/apps/deployment pkg/registry/apps/deployment/storage pkg/registry/apps/replicaset pkg/registry/apps/replicaset/storage pkg/registry/apps/rest -pkg/registry/apps/statefulset pkg/registry/apps/statefulset/storage pkg/registry/auditregistration/rest pkg/registry/authentication/rest @@ -241,10 +238,8 @@ pkg/registry/authorization/localsubjectaccessreview pkg/registry/authorization/rest pkg/registry/authorization/selfsubjectaccessreview pkg/registry/authorization/subjectaccessreview -pkg/registry/autoscaling/horizontalpodautoscaler pkg/registry/autoscaling/horizontalpodautoscaler/storage pkg/registry/autoscaling/rest -pkg/registry/batch/cronjob pkg/registry/batch/cronjob/storage pkg/registry/batch/job pkg/registry/batch/job/storage @@ -271,7 +266,6 @@ pkg/registry/core/pod/rest pkg/registry/core/podtemplate/storage pkg/registry/core/replicationcontroller pkg/registry/core/replicationcontroller/storage -pkg/registry/core/resourcequota pkg/registry/core/resourcequota/storage pkg/registry/core/rest pkg/registry/core/secret @@ -289,7 +283,6 @@ pkg/registry/extensions/controller/storage pkg/registry/extensions/rest pkg/registry/networking/networkpolicy/storage pkg/registry/networking/rest -pkg/registry/policy/poddisruptionbudget pkg/registry/policy/poddisruptionbudget/storage pkg/registry/policy/rest pkg/registry/rbac/clusterrole @@ -650,29 +643,24 @@ test/e2e/apps test/e2e/auth test/e2e/autoscaling test/e2e/chaosmonkey -test/e2e/cloud test/e2e/common test/e2e/framework -test/e2e/framework/ingress test/e2e/framework/providers/gce test/e2e/framework/providers/kubemark test/e2e/instrumentation test/e2e/instrumentation/logging test/e2e/instrumentation/monitoring -test/e2e/kubectl test/e2e/lifecycle test/e2e/lifecycle/bootstrap test/e2e/network test/e2e/node test/e2e/scalability test/e2e/scheduling -test/e2e/servicecatalog test/e2e/storage test/e2e/storage/drivers test/e2e/storage/testsuites test/e2e/storage/utils test/e2e/storage/vsphere -test/e2e/ui test/e2e/windows test/e2e_kubeadm test/e2e_node diff --git a/hack/testdata/kustomize/configmap.yaml b/hack/testdata/kustomize/configmap.yaml new file mode 100644 index 0000000000..e335ab8cc8 --- /dev/null +++ b/hack/testdata/kustomize/configmap.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: the-map +data: + altGreeting: "Good Morning!" + enableRisky: "false" diff --git a/hack/testdata/kustomize/deployment.yaml b/hack/testdata/kustomize/deployment.yaml new file mode 100644 index 0000000000..13c096f487 --- /dev/null +++ b/hack/testdata/kustomize/deployment.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: the-deployment + labels: + deployment: hello +spec: + replicas: 3 + selector: + matchLabels: + deployment: hello + template: + metadata: + labels: + deployment: hello + spec: + containers: + - name: the-container + image: monopole/hello:1 + command: ["/hello", + "--port=8080", + "--enableRiskyFeature=$(ENABLE_RISKY)"] + ports: + - containerPort: 8080 + env: + - name: ALT_GREETING + valueFrom: + configMapKeyRef: + name: the-map + key: altGreeting + - name: ENABLE_RISKY + valueFrom: + configMapKeyRef: + name: the-map + key: enableRisky diff --git a/hack/testdata/kustomize/kustomization.yaml b/hack/testdata/kustomize/kustomization.yaml new file mode 100644 index 0000000000..b680da17c6 --- /dev/null +++ b/hack/testdata/kustomize/kustomization.yaml @@ -0,0 +1,5 @@ +nameprefix: test- +resources: +- deployment.yaml +- service.yaml +- configmap.yaml diff --git a/hack/testdata/kustomize/service.yaml b/hack/testdata/kustomize/service.yaml new file mode 100644 index 0000000000..e238f70021 --- /dev/null +++ b/hack/testdata/kustomize/service.yaml @@ -0,0 +1,12 @@ +kind: Service +apiVersion: v1 +metadata: + name: the-service +spec: + selector: + deployment: hello + type: LoadBalancer + ports: + - protocol: TCP + port: 8666 + targetPort: 8080 diff --git a/pkg/.import-restrictions b/pkg/.import-restrictions index 77cc61216c..3a1985a20e 100644 --- a/pkg/.import-restrictions +++ b/pkg/.import-restrictions @@ -6,7 +6,8 @@ "" ], "ForbiddenPrefixes": [ - "k8s.io/kubernetes/cmd" + "k8s.io/kubernetes/cmd", + "github.com/ghodss/yaml" ] } ] diff --git a/pkg/BUILD b/pkg/BUILD index 04431003b8..c05c2b23db 100644 --- a/pkg/BUILD +++ b/pkg/BUILD @@ -24,7 +24,6 @@ filegroup( "//pkg/api/testapi:all-srcs", "//pkg/api/testing:all-srcs", "//pkg/api/v1/endpoints:all-srcs", - "//pkg/api/v1/node:all-srcs", "//pkg/api/v1/persistentvolume:all-srcs", "//pkg/api/v1/pod:all-srcs", "//pkg/api/v1/resource:all-srcs", diff --git a/pkg/api/v1/node/BUILD b/pkg/api/v1/node/BUILD deleted file mode 100644 index de080f93ee..0000000000 --- a/pkg/api/v1/node/BUILD +++ /dev/null @@ -1,26 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["util.go"], - importpath = "k8s.io/kubernetes/pkg/api/v1/node", - deps = ["//staging/src/k8s.io/api/core/v1:go_default_library"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/pkg/apis/admissionregistration/fuzzer/fuzzer.go b/pkg/apis/admissionregistration/fuzzer/fuzzer.go index ba4f2c5e5b..c68785abaa 100644 --- a/pkg/apis/admissionregistration/fuzzer/fuzzer.go +++ b/pkg/apis/admissionregistration/fuzzer/fuzzer.go @@ -32,6 +32,10 @@ var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} { obj.FailurePolicy = &p s := admissionregistration.SideEffectClassUnknown obj.SideEffects = &s + if obj.TimeoutSeconds == nil { + i := int32(30) + obj.TimeoutSeconds = &i + } }, } } diff --git a/pkg/apis/admissionregistration/types.go b/pkg/apis/admissionregistration/types.go index 9d4eb3813c..b95a14b854 100644 --- a/pkg/apis/admissionregistration/types.go +++ b/pkg/apis/admissionregistration/types.go @@ -208,6 +208,13 @@ type Webhook struct { // sideEffects == Unknown or Some. Defaults to Unknown. // +optional SideEffects *SideEffectClass + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // +optional + TimeoutSeconds *int32 } // RuleWithOperations is a tuple of Operations and Resources. It is recommended to make diff --git a/pkg/apis/admissionregistration/v1beta1/defaults.go b/pkg/apis/admissionregistration/v1beta1/defaults.go index fa35267624..81decaae25 100644 --- a/pkg/apis/admissionregistration/v1beta1/defaults.go +++ b/pkg/apis/admissionregistration/v1beta1/defaults.go @@ -40,4 +40,8 @@ func SetDefaults_Webhook(obj *admissionregistrationv1beta1.Webhook) { unknown := admissionregistrationv1beta1.SideEffectClassUnknown obj.SideEffects = &unknown } + if obj.TimeoutSeconds == nil { + obj.TimeoutSeconds = new(int32) + *obj.TimeoutSeconds = 30 + } } diff --git a/pkg/apis/admissionregistration/v1beta1/zz_generated.conversion.go b/pkg/apis/admissionregistration/v1beta1/zz_generated.conversion.go index 3d1d1be717..bbc12e8d9e 100644 --- a/pkg/apis/admissionregistration/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/admissionregistration/v1beta1/zz_generated.conversion.go @@ -301,6 +301,7 @@ func autoConvert_v1beta1_Webhook_To_admissionregistration_Webhook(in *v1beta1.We out.FailurePolicy = (*admissionregistration.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy)) out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) out.SideEffects = (*admissionregistration.SideEffectClass)(unsafe.Pointer(in.SideEffects)) + out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds)) return nil } @@ -318,6 +319,7 @@ func autoConvert_admissionregistration_Webhook_To_v1beta1_Webhook(in *admissionr out.FailurePolicy = (*v1beta1.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy)) out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) out.SideEffects = (*v1beta1.SideEffectClass)(unsafe.Pointer(in.SideEffects)) + out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds)) return nil } diff --git a/pkg/apis/admissionregistration/validation/validation.go b/pkg/apis/admissionregistration/validation/validation.go index 09200fb2bc..9cd0da0d9e 100644 --- a/pkg/apis/admissionregistration/validation/validation.go +++ b/pkg/apis/admissionregistration/validation/validation.go @@ -171,6 +171,9 @@ func validateWebhook(hook *admissionregistration.Webhook, fldPath *field.Path) f if hook.SideEffects != nil && !supportedSideEffectClasses.Has(string(*hook.SideEffects)) { allErrors = append(allErrors, field.NotSupported(fldPath.Child("sideEffects"), *hook.SideEffects, supportedSideEffectClasses.List())) } + if hook.TimeoutSeconds != nil && (*hook.TimeoutSeconds > 30 || *hook.TimeoutSeconds < 1) { + allErrors = append(allErrors, field.Invalid(fldPath.Child("timeoutSeconds"), *hook.TimeoutSeconds, "the timeout value must be between 1 and 30 seconds")) + } if hook.NamespaceSelector != nil { allErrors = append(allErrors, metav1validation.ValidateLabelSelector(hook.NamespaceSelector, fldPath.Child("namespaceSelector"))...) diff --git a/pkg/apis/admissionregistration/validation/validation_test.go b/pkg/apis/admissionregistration/validation/validation_test.go index 9a51b6631b..90d31fd419 100644 --- a/pkg/apis/admissionregistration/validation/validation_test.go +++ b/pkg/apis/admissionregistration/validation/validation_test.go @@ -26,6 +26,8 @@ import ( func strPtr(s string) *string { return &s } +func int32Ptr(i int32) *int32 { return &i } + func newValidatingWebhookConfiguration(hooks []admissionregistration.Webhook) *admissionregistration.ValidatingWebhookConfiguration { return &admissionregistration.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ @@ -544,6 +546,63 @@ func TestValidateValidatingWebhookConfiguration(t *testing.T) { }), expectedError: `clientConfig.service.path: Invalid value: "/apis/foo.bar/v1alpha1/--bad": segment[3]: a DNS-1123 subdomain`, }, + { + name: "timeout seconds cannot be greater than 30", + config: newValidatingWebhookConfiguration( + []admissionregistration.Webhook{ + { + Name: "webhook.k8s.io", + ClientConfig: validClientConfig, + TimeoutSeconds: int32Ptr(31), + }, + }), + expectedError: `webhooks[0].timeoutSeconds: Invalid value: 31: the timeout value must be between 1 and 30 seconds`, + }, + { + name: "timeout seconds cannot be smaller than 1", + config: newValidatingWebhookConfiguration( + []admissionregistration.Webhook{ + { + Name: "webhook.k8s.io", + ClientConfig: validClientConfig, + TimeoutSeconds: int32Ptr(0), + }, + }), + expectedError: `webhooks[0].timeoutSeconds: Invalid value: 0: the timeout value must be between 1 and 30 seconds`, + }, + { + name: "timeout seconds must be positive", + config: newValidatingWebhookConfiguration( + []admissionregistration.Webhook{ + { + Name: "webhook.k8s.io", + ClientConfig: validClientConfig, + TimeoutSeconds: int32Ptr(-1), + }, + }), + expectedError: `webhooks[0].timeoutSeconds: Invalid value: -1: the timeout value must be between 1 and 30 seconds`, + }, + { + name: "valid timeout seconds", + config: newValidatingWebhookConfiguration( + []admissionregistration.Webhook{ + { + Name: "webhook.k8s.io", + ClientConfig: validClientConfig, + TimeoutSeconds: int32Ptr(1), + }, + { + Name: "webhook2.k8s.io", + ClientConfig: validClientConfig, + TimeoutSeconds: int32Ptr(15), + }, + { + Name: "webhook3.k8s.io", + ClientConfig: validClientConfig, + TimeoutSeconds: int32Ptr(30), + }, + }), + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { diff --git a/pkg/apis/admissionregistration/zz_generated.deepcopy.go b/pkg/apis/admissionregistration/zz_generated.deepcopy.go index e5d2a43667..1f9b365c5f 100644 --- a/pkg/apis/admissionregistration/zz_generated.deepcopy.go +++ b/pkg/apis/admissionregistration/zz_generated.deepcopy.go @@ -257,6 +257,11 @@ func (in *Webhook) DeepCopyInto(out *Webhook) { *out = new(SideEffectClass) **out = **in } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } return } diff --git a/pkg/apis/policy/validation/validation_test.go b/pkg/apis/policy/validation/validation_test.go index b36f03d01a..c17d24b5c1 100644 --- a/pkg/apis/policy/validation/validation_test.go +++ b/pkg/apis/policy/validation/validation_test.go @@ -906,3 +906,34 @@ func TestValidatePSPRunAsGroup(t *testing.T) { }) } } + +func TestValidatePSPSELinux(t *testing.T) { + var testCases = []struct { + name string + selinux policy.SELinuxStrategyOptions + fail bool + }{ + {"SELinuxStrategyMustRunAs", + policy.SELinuxStrategyOptions{ + Rule: policy.SELinuxStrategyMustRunAs, + SELinuxOptions: &api.SELinuxOptions{Level: "s9:z0,z1"}}, false}, + {"SELinuxStrategyMustRunAs", + policy.SELinuxStrategyOptions{ + Rule: policy.SELinuxStrategyMustRunAs, + SELinuxOptions: &api.SELinuxOptions{Level: "s0"}}, false}, + } + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + errList := validatePSPSELinux(field.NewPath("Status"), &testCase.selinux) + actualErrors := len(errList) + expectedErrors := 1 + if !testCase.fail { + expectedErrors = 0 + } + if actualErrors != expectedErrors { + t.Errorf("In testCase %v, expected %v errors, got %v errors", testCase.name, expectedErrors, actualErrors) + } + }) + } + +} diff --git a/pkg/client/tests/listwatch_test.go b/pkg/client/tests/listwatch_test.go index 35a8a256e4..1adaf67adb 100644 --- a/pkg/client/tests/listwatch_test.go +++ b/pkg/client/tests/listwatch_test.go @@ -17,6 +17,7 @@ limitations under the License. package tests import ( + "context" "net/http/httptest" "net/url" "testing" @@ -194,11 +195,20 @@ func (w lw) Watch(options metav1.ListOptions) (watch.Interface, error) { func TestListWatchUntil(t *testing.T) { fw := watch.NewFake() go func() { - var obj *v1.Pod + obj := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "2", + }, + } fw.Modify(obj) }() listwatch := lw{ - list: &v1.PodList{Items: []v1.Pod{{}}}, + list: &v1.PodList{ + ListMeta: metav1.ListMeta{ + ResourceVersion: "1", + }, + Items: []v1.Pod{{}}, + }, watch: fw, } @@ -213,8 +223,9 @@ func TestListWatchUntil(t *testing.T) { }, } - timeout := 10 * time.Second - lastEvent, err := watchtools.ListWatchUntil(timeout, listwatch, conditions...) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + lastEvent, err := watchtools.ListWatchUntil(ctx, listwatch, conditions...) if err != nil { t.Fatalf("expected nil error, got %#v", err) } diff --git a/pkg/controller/.import-restrictions b/pkg/controller/.import-restrictions index 6000753734..e3b8394d05 100644 --- a/pkg/controller/.import-restrictions +++ b/pkg/controller/.import-restrictions @@ -167,7 +167,6 @@ "AllowedPrefixes": [ "k8s.io/kubernetes/pkg/api/legacyscheme", "k8s.io/kubernetes/pkg/api/v1/endpoints", - "k8s.io/kubernetes/pkg/api/v1/node", "k8s.io/kubernetes/pkg/api/v1/pod", "k8s.io/kubernetes/pkg/apis/apps/v1", "k8s.io/kubernetes/pkg/apis/autoscaling", diff --git a/pkg/controller/certificates/certificate_controller_test.go b/pkg/controller/certificates/certificate_controller_test.go index 8805fbfcef..daf4689a42 100644 --- a/pkg/controller/certificates/certificate_controller_test.go +++ b/pkg/controller/certificates/certificate_controller_test.go @@ -47,7 +47,7 @@ func TestCertificateController(t *testing.T) { Reason: "test reason", Message: "test message", }) - _, err := client.Certificates().CertificateSigningRequests().UpdateApproval(csr) + _, err := client.CertificatesV1beta1().CertificateSigningRequests().UpdateApproval(csr) if err != nil { return err } diff --git a/pkg/controller/cloud/BUILD b/pkg/controller/cloud/BUILD index 1997df8099..327a8ab741 100644 --- a/pkg/controller/cloud/BUILD +++ b/pkg/controller/cloud/BUILD @@ -15,9 +15,9 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/controller/cloud", deps = [ - "//pkg/api/v1/node:go_default_library", "//pkg/apis/core/v1/helper:go_default_library", "//pkg/controller:go_default_library", + "//pkg/controller/util/node:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/scheduler/api:go_default_library", "//pkg/util/node:go_default_library", diff --git a/pkg/controller/cloud/node_lifecycle_controller.go b/pkg/controller/cloud/node_lifecycle_controller.go index 809e85c55d..c75c9e801e 100644 --- a/pkg/controller/cloud/node_lifecycle_controller.go +++ b/pkg/controller/cloud/node_lifecycle_controller.go @@ -35,8 +35,8 @@ import ( "k8s.io/client-go/tools/record" cloudprovider "k8s.io/cloud-provider" "k8s.io/klog" - nodeutilv1 "k8s.io/kubernetes/pkg/api/v1/node" "k8s.io/kubernetes/pkg/controller" + nodeutil "k8s.io/kubernetes/pkg/controller/util/node" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" ) @@ -133,7 +133,7 @@ func (c *CloudNodeLifecycleController) MonitorNodes() { for _, node := range nodes { // Default NodeReady status to v1.ConditionUnknown status := v1.ConditionUnknown - if _, c := nodeutilv1.GetNodeCondition(&node.Status, v1.NodeReady); c != nil { + if _, c := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady); c != nil { status = c.Status } diff --git a/pkg/controller/namespace/deletion/namespaced_resources_deleter_test.go b/pkg/controller/namespace/deletion/namespaced_resources_deleter_test.go index ae9770cad0..e7ca633351 100644 --- a/pkg/controller/namespace/deletion/namespaced_resources_deleter_test.go +++ b/pkg/controller/namespace/deletion/namespaced_resources_deleter_test.go @@ -66,7 +66,7 @@ func TestFinalizeNamespaceFunc(t *testing.T) { }, } d := namespacedResourcesDeleter{ - nsClient: mockClient.Core().Namespaces(), + nsClient: mockClient.CoreV1().Namespaces(), finalizerToken: v1.FinalizerKubernetes, } d.finalizeNamespace(testNamespace) @@ -180,7 +180,7 @@ func testSyncNamespaceThatIsTerminating(t *testing.T, versions *metav1.APIVersio fn := func() ([]*metav1.APIResourceList, error) { return resources, nil } - d := NewNamespacedResourcesDeleter(mockClient.Core().Namespaces(), dynamicClient, mockClient.Core(), fn, v1.FinalizerKubernetes, true) + d := NewNamespacedResourcesDeleter(mockClient.CoreV1().Namespaces(), dynamicClient, mockClient.CoreV1(), fn, v1.FinalizerKubernetes, true) if err := d.Delete(testInput.testNamespace.Name); err != nil { t.Errorf("scenario %s - Unexpected error when synching namespace %v", scenario, err) } @@ -219,7 +219,7 @@ func TestRetryOnConflictError(t *testing.T) { } namespace := &v1.Namespace{} d := namespacedResourcesDeleter{ - nsClient: mockClient.Core().Namespaces(), + nsClient: mockClient.CoreV1().Namespaces(), } _, err := d.retryOnConflictError(namespace, retryOnce) if err != nil { @@ -255,7 +255,7 @@ func TestSyncNamespaceThatIsActive(t *testing.T) { fn := func() ([]*metav1.APIResourceList, error) { return testResources(), nil } - d := NewNamespacedResourcesDeleter(mockClient.Core().Namespaces(), nil, mockClient.Core(), + d := NewNamespacedResourcesDeleter(mockClient.CoreV1().Namespaces(), nil, mockClient.CoreV1(), fn, v1.FinalizerKubernetes, true) err := d.Delete(testNamespace.Name) if err != nil { diff --git a/pkg/controller/nodeipam/ipam/BUILD b/pkg/controller/nodeipam/ipam/BUILD index e95fd1f7c9..d7dd6e6e1e 100644 --- a/pkg/controller/nodeipam/ipam/BUILD +++ b/pkg/controller/nodeipam/ipam/BUILD @@ -42,7 +42,6 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/controller/nodeipam/ipam", deps = [ - "//pkg/api/v1/node:go_default_library", "//pkg/cloudprovider/providers/gce:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/nodeipam/ipam/cidrset:go_default_library", diff --git a/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go b/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go index 8c39a9f469..2102255571 100644 --- a/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go +++ b/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go @@ -39,7 +39,6 @@ import ( "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" cloudprovider "k8s.io/cloud-provider" - v1node "k8s.io/kubernetes/pkg/api/v1/node" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/controller" nodeutil "k8s.io/kubernetes/pkg/controller/util/node" @@ -118,7 +117,7 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter // Even if PodCIDR is assigned, but NetworkUnavailable condition is // set to true, we need to process the node to set the condition. networkUnavailableTaint := &v1.Taint{Key: schedulerapi.TaintNodeNetworkUnavailable, Effect: v1.TaintEffectNoSchedule} - _, cond := v1node.GetNodeCondition(&newNode.Status, v1.NodeNetworkUnavailable) + _, cond := nodeutil.GetNodeCondition(&newNode.Status, v1.NodeNetworkUnavailable) if cond == nil || cond.Status != v1.ConditionFalse || utiltaints.TaintExists(newNode.Spec.Taints, networkUnavailableTaint) { return ca.AllocateOrOccupyCIDR(newNode) } diff --git a/pkg/controller/nodelifecycle/BUILD b/pkg/controller/nodelifecycle/BUILD index a7935ff790..0dfe54eede 100644 --- a/pkg/controller/nodelifecycle/BUILD +++ b/pkg/controller/nodelifecycle/BUILD @@ -9,7 +9,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/controller/nodelifecycle", visibility = ["//visibility:public"], deps = [ - "//pkg/api/v1/node:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/nodelifecycle/scheduler:go_default_library", "//pkg/controller/util/node:go_default_library", diff --git a/pkg/controller/nodelifecycle/node_lifecycle_controller.go b/pkg/controller/nodelifecycle/node_lifecycle_controller.go index 87796c5757..9359a611b6 100644 --- a/pkg/controller/nodelifecycle/node_lifecycle_controller.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller.go @@ -52,7 +52,6 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/workqueue" - v1node "k8s.io/kubernetes/pkg/api/v1/node" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler" nodeutil "k8s.io/kubernetes/pkg/controller/util/node" @@ -525,7 +524,7 @@ func (nc *Controller) doNoExecuteTaintingPass() { // retry in 50 millisecond return false, 50 * time.Millisecond } - _, condition := v1node.GetNodeCondition(&node.Status, v1.NodeReady) + _, condition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) // Because we want to mimic NodeStatus.Condition["Ready"] we make "unreachable" and "not ready" taints mutually exclusive. taintToAdd := v1.Taint{} oppositeTaint := v1.Taint{} @@ -742,7 +741,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node var err error var gracePeriod time.Duration var observedReadyCondition v1.NodeCondition - _, currentReadyCondition := v1node.GetNodeCondition(&node.Status, v1.NodeReady) + _, currentReadyCondition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) if currentReadyCondition == nil { // If ready condition is nil, then kubelet (or nodecontroller) never posted node status. // A fake ready condition is created, where LastHeartbeatTime and LastTransitionTime is set @@ -787,10 +786,10 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node var savedCondition *v1.NodeCondition var savedLease *coordv1beta1.Lease if found { - _, savedCondition = v1node.GetNodeCondition(savedNodeHealth.status, v1.NodeReady) + _, savedCondition = nodeutil.GetNodeCondition(savedNodeHealth.status, v1.NodeReady) savedLease = savedNodeHealth.lease } - _, observedCondition := v1node.GetNodeCondition(&node.Status, v1.NodeReady) + _, observedCondition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) if !found { klog.Warningf("Missing timestamp for Node %s. Assuming now as a timestamp.", node.Name) savedNodeHealth = &nodeHealthData{ @@ -885,7 +884,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node nowTimestamp := nc.now() for _, nodeConditionType := range remainingNodeConditionTypes { - _, currentCondition := v1node.GetNodeCondition(&node.Status, nodeConditionType) + _, currentCondition := nodeutil.GetNodeCondition(&node.Status, nodeConditionType) if currentCondition == nil { klog.V(2).Infof("Condition %v of node %v was never updated by kubelet", nodeConditionType, node.Name) node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{ @@ -908,7 +907,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node } } - _, currentCondition := v1node.GetNodeCondition(&node.Status, v1.NodeReady) + _, currentCondition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) if !apiequality.Semantic.DeepEqual(currentCondition, &observedReadyCondition) { if _, err = nc.kubeClient.CoreV1().Nodes().UpdateStatus(node); err != nil { klog.Errorf("Error updating node %s: %v", node.Name, err) diff --git a/pkg/controller/podautoscaler/horizontal_test.go b/pkg/controller/podautoscaler/horizontal_test.go index 406c77ffff..524f6e7c8a 100644 --- a/pkg/controller/podautoscaler/horizontal_test.go +++ b/pkg/controller/podautoscaler/horizontal_test.go @@ -665,9 +665,9 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform defaultDownscalestabilizationWindow := 5 * time.Minute hpaController := NewHorizontalController( - eventClient.Core(), + eventClient.CoreV1(), testScaleClient, - testClient.Autoscaling(), + testClient.AutoscalingV1(), testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme), metricsClient, informerFactory.Autoscaling().V1().HorizontalPodAutoscalers(), diff --git a/pkg/controller/podautoscaler/legacy_horizontal_test.go b/pkg/controller/podautoscaler/legacy_horizontal_test.go index 4f97232a52..dfcda2e294 100644 --- a/pkg/controller/podautoscaler/legacy_horizontal_test.go +++ b/pkg/controller/podautoscaler/legacy_horizontal_test.go @@ -494,9 +494,9 @@ func (tc *legacyTestCase) runTest(t *testing.T) { defaultDownscaleStabilisationWindow := 5 * time.Minute hpaController := NewHorizontalController( - eventClient.Core(), + eventClient.CoreV1(), testScaleClient, - testClient.Autoscaling(), + testClient.AutoscalingV1(), testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme), metricsClient, informerFactory.Autoscaling().V1().HorizontalPodAutoscalers(), diff --git a/pkg/controller/replicaset/replica_set_test.go b/pkg/controller/replicaset/replica_set_test.go index 278601d0ef..4c98ef4d88 100644 --- a/pkg/controller/replicaset/replica_set_test.go +++ b/pkg/controller/replicaset/replica_set_test.go @@ -672,7 +672,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) { fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) { return true, &apps.ReplicaSet{}, fmt.Errorf("Fake error") }) - fakeRSClient := fakeClient.Apps().ReplicaSets("default") + fakeRSClient := fakeClient.AppsV1().ReplicaSets("default") numReplicas := int32(10) newStatus := apps.ReplicaSetStatus{Replicas: numReplicas} updateReplicaSetStatus(fakeRSClient, rs, newStatus) diff --git a/pkg/controller/route/BUILD b/pkg/controller/route/BUILD index b756e1bc20..f74931ae35 100644 --- a/pkg/controller/route/BUILD +++ b/pkg/controller/route/BUILD @@ -14,8 +14,8 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/controller/route", deps = [ - "//pkg/api/v1/node:go_default_library", "//pkg/controller:go_default_library", + "//pkg/controller/util/node:go_default_library", "//pkg/util/metrics:go_default_library", "//pkg/util/node:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -42,9 +42,9 @@ go_test( srcs = ["route_controller_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/api/v1/node:go_default_library", "//pkg/cloudprovider/providers/fake:go_default_library", "//pkg/controller:go_default_library", + "//pkg/controller/util/node:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/pkg/controller/route/route_controller.go b/pkg/controller/route/route_controller.go index a8fd29e39c..27719b178e 100644 --- a/pkg/controller/route/route_controller.go +++ b/pkg/controller/route/route_controller.go @@ -40,10 +40,10 @@ import ( "k8s.io/client-go/tools/record" clientretry "k8s.io/client-go/util/retry" cloudprovider "k8s.io/cloud-provider" - v1node "k8s.io/kubernetes/pkg/api/v1/node" "k8s.io/kubernetes/pkg/controller" + nodeutil "k8s.io/kubernetes/pkg/controller/util/node" "k8s.io/kubernetes/pkg/util/metrics" - nodeutil "k8s.io/kubernetes/pkg/util/node" + utilnode "k8s.io/kubernetes/pkg/util/node" ) const ( @@ -201,7 +201,7 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R }(nodeName, nameHint, route) } else { // Update condition only if it doesn't reflect the current state. - _, condition := v1node.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable) + _, condition := nodeutil.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable) if condition == nil || condition.Status != v1.ConditionFalse { rc.updateNetworkingCondition(types.NodeName(node.Name), true) } @@ -237,7 +237,7 @@ func (rc *RouteController) updateNetworkingCondition(nodeName types.NodeName, ro // patch in the retry loop. currentTime := metav1.Now() if routeCreated { - err = nodeutil.SetNodeCondition(rc.kubeClient, nodeName, v1.NodeCondition{ + err = utilnode.SetNodeCondition(rc.kubeClient, nodeName, v1.NodeCondition{ Type: v1.NodeNetworkUnavailable, Status: v1.ConditionFalse, Reason: "RouteCreated", @@ -245,7 +245,7 @@ func (rc *RouteController) updateNetworkingCondition(nodeName types.NodeName, ro LastTransitionTime: currentTime, }) } else { - err = nodeutil.SetNodeCondition(rc.kubeClient, nodeName, v1.NodeCondition{ + err = utilnode.SetNodeCondition(rc.kubeClient, nodeName, v1.NodeCondition{ Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue, Reason: "NoRouteCreated", diff --git a/pkg/controller/route/route_controller_test.go b/pkg/controller/route/route_controller_test.go index b7ea9bd770..4283779546 100644 --- a/pkg/controller/route/route_controller_test.go +++ b/pkg/controller/route/route_controller_test.go @@ -29,9 +29,9 @@ import ( "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" cloudprovider "k8s.io/cloud-provider" - nodeutil "k8s.io/kubernetes/pkg/api/v1/node" fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" "k8s.io/kubernetes/pkg/controller" + nodeutil "k8s.io/kubernetes/pkg/controller/util/node" ) func alwaysReady() bool { return true } diff --git a/pkg/controller/testutil/test_utils.go b/pkg/controller/testutil/test_utils.go index 069c2d61a8..be13e4637a 100644 --- a/pkg/controller/testutil/test_utils.go +++ b/pkg/controller/testutil/test_utils.go @@ -96,7 +96,7 @@ func (m *FakeNodeHandler) GetUpdatedNodesCopy() []*v1.Node { // Core returns fake CoreInterface. func (m *FakeNodeHandler) Core() v1core.CoreV1Interface { - return &FakeLegacyHandler{m.Clientset.Core(), m} + return &FakeLegacyHandler{m.Clientset.CoreV1(), m} } // CoreV1 returns fake CoreV1Interface diff --git a/pkg/controller/util/node/controller_utils.go b/pkg/controller/util/node/controller_utils.go index 7d67118cc6..c4e8b31604 100644 --- a/pkg/controller/util/node/controller_utils.go +++ b/pkg/controller/util/node/controller_utils.go @@ -260,3 +260,17 @@ func CreateDeleteNodeHandler(f func(node *v1.Node) error) func(obj interface{}) } } } + +// GetNodeCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetNodeCondition(status *v1.NodeStatus, conditionType v1.NodeConditionType) (int, *v1.NodeCondition) { + if status == nil { + return -1, nil + } + for i := range status.Conditions { + if status.Conditions[i].Type == conditionType { + return i, &status.Conditions[i] + } + } + return -1, nil +} diff --git a/pkg/controller/volume/attachdetach/attach_detach_controller_test.go b/pkg/controller/volume/attachdetach/attach_detach_controller_test.go index ec66fab1f8..c197ae2dd8 100644 --- a/pkg/controller/volume/attachdetach/attach_detach_controller_test.go +++ b/pkg/controller/volume/attachdetach/attach_detach_controller_test.go @@ -156,7 +156,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 stopCh := make(chan struct{}) - pods, err := fakeKubeClient.Core().Pods(v1.NamespaceAll).List(metav1.ListOptions{}) + pods, err := fakeKubeClient.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{}) if err != nil { t.Fatalf("Run failed with error. Expected: Actual: %v", err) } @@ -166,7 +166,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 podInformer.GetIndexer().Add(&podToAdd) podsNum++ } - nodes, err := fakeKubeClient.Core().Nodes().List(metav1.ListOptions{}) + nodes, err := fakeKubeClient.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { t.Fatalf("Run failed with error. Expected: Actual: %v", err) } diff --git a/pkg/kubectl/cmd/annotate/annotate.go b/pkg/kubectl/cmd/annotate/annotate.go index 860cc10050..fe8d15dc92 100644 --- a/pkg/kubectl/cmd/annotate/annotate.go +++ b/pkg/kubectl/cmd/annotate/annotate.go @@ -208,7 +208,7 @@ func (o AnnotateOptions) Validate() error { if o.all && len(o.fieldSelector) > 0 { return fmt.Errorf("cannot set --all and --field-selector at the same time") } - if len(o.resources) < 1 && cmdutil.IsFilenameSliceEmpty(o.Filenames) { + if len(o.resources) < 1 && cmdutil.IsFilenameSliceEmpty(o.Filenames, o.Kustomize) { return fmt.Errorf("one or more resources must be specified as or /") } if len(o.newAnnotations) < 1 && len(o.removeAnnotations) < 1 { diff --git a/pkg/kubectl/cmd/apply/apply.go b/pkg/kubectl/cmd/apply/apply.go index 11ed554822..99393101b1 100644 --- a/pkg/kubectl/cmd/apply/apply.go +++ b/pkg/kubectl/cmd/apply/apply.go @@ -115,6 +115,9 @@ var ( # Apply the configuration in pod.json to a pod. kubectl apply -f ./pod.json + # Apply resources from a directory containing kustomization.yaml - e.g. dir/kustomization.yaml. + kubectl apply -k dir/ + # Apply the JSON passed into stdin to a pod. cat pod.json | kubectl apply -f - @@ -152,7 +155,7 @@ func NewCmdApply(baseName string, f cmdutil.Factory, ioStreams genericclioptions o.cmdBaseName = baseName cmd := &cobra.Command{ - Use: "apply -f FILENAME", + Use: "apply (-f FILENAME | -k DIRECTORY)", DisableFlagsInUseLine: true, Short: i18n.T("Apply a configuration to a resource by filename or stdin"), Long: applyLong, @@ -170,7 +173,6 @@ func NewCmdApply(baseName string, f cmdutil.Factory, ioStreams genericclioptions o.RecordFlags.AddFlags(cmd) o.PrintFlags.AddFlags(cmd) - cmd.MarkFlagRequired("filename") cmd.Flags().BoolVar(&o.Overwrite, "overwrite", o.Overwrite, "Automatically resolve conflicts between the modified and live configuration by using values from the modified configuration") cmd.Flags().BoolVar(&o.Prune, "prune", o.Prune, "Automatically delete resource objects, including the uninitialized ones, that do not appear in the configs and are created by either apply or create --save-config. Should be used with either -l or --all.") cmdutil.AddValidateFlags(cmd) @@ -237,6 +239,10 @@ func (o *ApplyOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { return err } o.DeleteOptions = o.DeleteFlags.ToOptions(dynamicClient, o.IOStreams) + err = o.DeleteOptions.FilenameOptions.RequireFilenameOrKustomize() + if err != nil { + return err + } o.OpenAPISchema, _ = f.OpenAPISchema() o.Validator, err = f.Validator(cmdutil.GetFlagBool(cmd, "validate")) diff --git a/pkg/kubectl/cmd/auth/reconcile.go b/pkg/kubectl/cmd/auth/reconcile.go index 02ccca5a3f..f253121e71 100644 --- a/pkg/kubectl/cmd/auth/reconcile.go +++ b/pkg/kubectl/cmd/auth/reconcile.go @@ -107,13 +107,16 @@ func NewCmdReconcile(f cmdutil.Factory, streams genericclioptions.IOStreams) *co cmd.Flags().BoolVar(&o.DryRun, "dry-run", o.DryRun, "If true, display results but do not submit changes") cmd.Flags().BoolVar(&o.RemoveExtraPermissions, "remove-extra-permissions", o.RemoveExtraPermissions, "If true, removes extra permissions added to roles") cmd.Flags().BoolVar(&o.RemoveExtraSubjects, "remove-extra-subjects", o.RemoveExtraSubjects, "If true, removes extra subjects added to rolebindings") - cmd.MarkFlagRequired("filename") return cmd } // Complete completes all the required options func (o *ReconcileOptions) Complete(cmd *cobra.Command, f cmdutil.Factory, args []string) error { + if err := o.FilenameOptions.RequireFilenameOrKustomize(); err != nil { + return err + } + if len(args) > 0 { return errors.New("no arguments are allowed") } diff --git a/pkg/kubectl/cmd/autoscale/BUILD b/pkg/kubectl/cmd/autoscale/BUILD index fdb1dce6c2..f5b62322f6 100644 --- a/pkg/kubectl/cmd/autoscale/BUILD +++ b/pkg/kubectl/cmd/autoscale/BUILD @@ -10,7 +10,6 @@ go_library( "//pkg/kubectl/cmd/util:go_default_library", "//pkg/kubectl/generate:go_default_library", "//pkg/kubectl/generate/versioned:go_default_library", - "//pkg/kubectl/polymorphichelpers:go_default_library", "//pkg/kubectl/scheme:go_default_library", "//pkg/kubectl/util/i18n:go_default_library", "//pkg/kubectl/util/templates:go_default_library", @@ -20,6 +19,7 @@ go_library( "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1:go_default_library", + "//staging/src/k8s.io/client-go/scale:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], diff --git a/pkg/kubectl/cmd/autoscale/autoscale.go b/pkg/kubectl/cmd/autoscale/autoscale.go index d26a52b559..7c1cbbd46d 100644 --- a/pkg/kubectl/cmd/autoscale/autoscale.go +++ b/pkg/kubectl/cmd/autoscale/autoscale.go @@ -28,11 +28,11 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" autoscalingv1client "k8s.io/client-go/kubernetes/typed/autoscaling/v1" + "k8s.io/client-go/scale" "k8s.io/kubernetes/pkg/kubectl" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/generate" generateversioned "k8s.io/kubernetes/pkg/kubectl/generate/versioned" - "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" "k8s.io/kubernetes/pkg/kubectl/util/templates" @@ -74,10 +74,10 @@ type AutoscaleOptions struct { namespace string dryRun bool builder *resource.Builder - canBeAutoscaled polymorphichelpers.CanBeAutoscaledFunc generatorFunc func(string, *meta.RESTMapping) (generate.StructuredGenerator, error) - HPAClient autoscalingv1client.HorizontalPodAutoscalersGetter + HPAClient autoscalingv1client.HorizontalPodAutoscalersGetter + scaleKindResolver scale.ScaleKindResolver genericclioptions.IOStreams } @@ -133,7 +133,11 @@ func (o *AutoscaleOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args o.dryRun = cmdutil.GetFlagBool(cmd, "dry-run") o.createAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) o.builder = f.NewBuilder() - o.canBeAutoscaled = polymorphichelpers.CanBeAutoscaledFn + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return err + } + o.scaleKindResolver = scale.NewDiscoveryScaleKindResolver(discoveryClient) o.args = args o.RecordFlags.Complete(cmd) @@ -196,7 +200,7 @@ func (o *AutoscaleOptions) Validate() error { func (o *AutoscaleOptions) Run() error { r := o.builder. - WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). + Unstructured(). ContinueOnError(). NamespaceParam(o.namespace).DefaultNamespace(). FilenameParam(o.enforceNamespace, o.FilenameOptions). @@ -214,8 +218,9 @@ func (o *AutoscaleOptions) Run() error { } mapping := info.ResourceMapping() - if err := o.canBeAutoscaled(mapping.GroupVersionKind.GroupKind()); err != nil { - return err + gvr := mapping.GroupVersionKind.GroupVersion().WithResource(mapping.Resource.Resource) + if _, err := o.scaleKindResolver.ScaleForResource(gvr); err != nil { + return fmt.Errorf("cannot autoscale a %v: %v", mapping.GroupVersionKind.Kind, err) } generator, err := o.generatorFunc(info.Name, mapping) diff --git a/pkg/kubectl/cmd/certificates/certificates.go b/pkg/kubectl/cmd/certificates/certificates.go index fb537f2dd6..bd8d370763 100644 --- a/pkg/kubectl/cmd/certificates/certificates.go +++ b/pkg/kubectl/cmd/certificates/certificates.go @@ -104,7 +104,7 @@ func (o *CertificateOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, arg } func (o *CertificateOptions) Validate() error { - if len(o.csrNames) < 1 && cmdutil.IsFilenameSliceEmpty(o.Filenames) { + if len(o.csrNames) < 1 && cmdutil.IsFilenameSliceEmpty(o.Filenames, o.Kustomize) { return fmt.Errorf("one or more CSRs must be specified as or -f ") } return nil diff --git a/pkg/kubectl/cmd/convert/convert.go b/pkg/kubectl/cmd/convert/convert.go index 506b736577..b34a4aff91 100644 --- a/pkg/kubectl/cmd/convert/convert.go +++ b/pkg/kubectl/cmd/convert/convert.go @@ -107,12 +107,15 @@ func NewCmdConvert(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *co cmdutil.AddValidateFlags(cmd) cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, "to need to get converted.") - cmd.MarkFlagRequired("filename") return cmd } // Complete collects information required to run Convert command from command line. func (o *ConvertOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) (err error) { + err = o.FilenameOptions.RequireFilenameOrKustomize() + if err != nil { + return err + } o.builder = f.NewBuilder o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace() diff --git a/pkg/kubectl/cmd/cp/cp.go b/pkg/kubectl/cmd/cp/cp.go index 4f6c93e9a1..0c7f19d903 100644 --- a/pkg/kubectl/cmd/cp/cp.go +++ b/pkg/kubectl/cmd/cp/cp.go @@ -499,7 +499,7 @@ func (o *CopyOptions) execute(options *exec.ExecOptions) error { } options.Config = o.ClientConfig - options.PodClient = o.Clientset.Core() + options.PodClient = o.Clientset.CoreV1() if err := options.Validate(); err != nil { return err diff --git a/pkg/kubectl/cmd/create/create.go b/pkg/kubectl/cmd/create/create.go index 1a8f230bc2..f005ee37e2 100644 --- a/pkg/kubectl/cmd/create/create.go +++ b/pkg/kubectl/cmd/create/create.go @@ -103,7 +103,8 @@ func NewCmdCreate(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cob Long: createLong, Example: createExample, Run: func(cmd *cobra.Command, args []string) { - if cmdutil.IsFilenameSliceEmpty(o.FilenameOptions.Filenames) { + if cmdutil.IsFilenameSliceEmpty(o.FilenameOptions.Filenames, o.FilenameOptions.Kustomize) { + ioStreams.ErrOut.Write([]byte("Error: must specify one of -f and -k\n\n")) defaultRunFunc := cmdutil.DefaultSubCommandRun(ioStreams.ErrOut) defaultRunFunc(cmd, args) return @@ -119,7 +120,6 @@ func NewCmdCreate(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cob usage := "to use to create the resource" cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage) - cmd.MarkFlagRequired("filename") cmdutil.AddValidateFlags(cmd) cmd.Flags().BoolVar(&o.EditBeforeCreate, "edit", o.EditBeforeCreate, "Edit the API resource before creating") cmd.Flags().Bool("windows-line-endings", runtime.GOOS == "windows", @@ -184,7 +184,6 @@ func (o *CreateOptions) ValidateArgs(cmd *cobra.Command, args []string) error { // Complete completes all the required options func (o *CreateOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { var err error - o.RecordFlags.Complete(cmd) o.Recorder, err = o.RecordFlags.ToRecorder() if err != nil { diff --git a/pkg/kubectl/cmd/delete/delete.go b/pkg/kubectl/cmd/delete/delete.go index eabc6c6df0..ca232353bd 100644 --- a/pkg/kubectl/cmd/delete/delete.go +++ b/pkg/kubectl/cmd/delete/delete.go @@ -71,6 +71,9 @@ var ( # Delete a pod using the type and name specified in pod.json. kubectl delete -f ./pod.json + # Delete resources from a directory containing kustomization.yaml - e.g. dir/kustomization.yaml. + kubectl delete -k dir + # Delete a pod based on the type and name in the JSON passed into stdin. cat pod.json | kubectl delete -f - @@ -102,6 +105,7 @@ type DeleteOptions struct { DeleteNow bool ForceDeletion bool WaitForDeletion bool + Quiet bool GracePeriod int Timeout time.Duration @@ -119,7 +123,7 @@ func NewCmdDelete(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra deleteFlags := NewDeleteCommandFlags("containing the resource to delete.") cmd := &cobra.Command{ - Use: "delete ([-f FILENAME] | TYPE [(NAME | -l label | --all)])", + Use: "delete ([-f FILENAME] | [-k DIRECTORY] | TYPE [(NAME | -l label | --all)])", DisableFlagsInUseLine: true, Short: i18n.T("Delete resources by filenames, stdin, resources and names, or by resources and label selector"), Long: deleteLong, @@ -313,7 +317,9 @@ func (o *DeleteOptions) deleteResource(info *resource.Info, deleteOptions *metav return nil, cmdutil.AddSourceToErr("deleting", info.Source, err) } - o.PrintObj(info) + if !o.Quiet { + o.PrintObj(info) + } return deleteResponse, nil } diff --git a/pkg/kubectl/cmd/delete/delete_flags.go b/pkg/kubectl/cmd/delete/delete_flags.go index 4a65a9c1a8..0cab48b174 100644 --- a/pkg/kubectl/cmd/delete/delete_flags.go +++ b/pkg/kubectl/cmd/delete/delete_flags.go @@ -156,9 +156,11 @@ func NewDeleteCommandFlags(usage string) *DeleteFlags { filenames := []string{} recursive := false + kustomize := "" return &DeleteFlags{ - FileNameFlags: &genericclioptions.FileNameFlags{Usage: usage, Filenames: &filenames, Recursive: &recursive}, + // Not using helpers.go since it provides function to add '-k' for FileNameOptions, but not FileNameFlags + FileNameFlags: &genericclioptions.FileNameFlags{Usage: usage, Filenames: &filenames, Kustomize: &kustomize, Recursive: &recursive}, LabelSelector: &labelSelector, FieldSelector: &fieldSelector, @@ -186,10 +188,11 @@ func NewDeleteFlags(usage string) *DeleteFlags { wait := false filenames := []string{} + kustomize := "" recursive := false return &DeleteFlags{ - FileNameFlags: &genericclioptions.FileNameFlags{Usage: usage, Filenames: &filenames, Recursive: &recursive}, + FileNameFlags: &genericclioptions.FileNameFlags{Usage: usage, Filenames: &filenames, Kustomize: &kustomize, Recursive: &recursive}, Cascade: &cascade, GracePeriod: &gracePeriod, diff --git a/pkg/kubectl/cmd/describe/describe.go b/pkg/kubectl/cmd/describe/describe.go index b0f9cb1d48..bd85780902 100644 --- a/pkg/kubectl/cmd/describe/describe.go +++ b/pkg/kubectl/cmd/describe/describe.go @@ -131,7 +131,7 @@ func (o *DescribeOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [ o.EnforceNamespace = false } - if len(args) == 0 && cmdutil.IsFilenameSliceEmpty(o.FilenameOptions.Filenames) { + if len(args) == 0 && cmdutil.IsFilenameSliceEmpty(o.FilenameOptions.Filenames, o.FilenameOptions.Kustomize) { return fmt.Errorf("You must specify the type of resource to describe. %s\n", cmdutil.SuggestAPIResources(o.CmdParent)) } diff --git a/pkg/kubectl/cmd/diff/diff.go b/pkg/kubectl/cmd/diff/diff.go index e64e8a0990..5f68198767 100644 --- a/pkg/kubectl/cmd/diff/diff.go +++ b/pkg/kubectl/cmd/diff/diff.go @@ -118,7 +118,6 @@ func NewCmdDiff(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.C usage := "contains the configuration to diff" cmdutil.AddFilenameOptionFlags(cmd, &options.FilenameOptions, usage) cmdutil.AddServerSideApplyFlags(cmd) - cmd.MarkFlagRequired("filename") return cmd } @@ -395,6 +394,11 @@ func isConflict(err error) bool { func (o *DiffOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { var err error + err = o.FilenameOptions.RequireFilenameOrKustomize() + if err != nil { + return err + } + o.ServerSideApply = cmdutil.GetServerSideApplyFlag(cmd) o.ForceConflicts = cmdutil.GetForceConflictsFlag(cmd) if o.ForceConflicts && !o.ServerSideApply { diff --git a/pkg/kubectl/cmd/get/get.go b/pkg/kubectl/cmd/get/get.go index 73a450d36f..b3fa41b3c5 100644 --- a/pkg/kubectl/cmd/get/get.go +++ b/pkg/kubectl/cmd/get/get.go @@ -115,6 +115,9 @@ var ( # List a pod identified by type and name specified in "pod.yaml" in JSON output format. kubectl get -f pod.yaml -o json + # List resources from a directory with kustomization.yaml - e.g. dir/kustomization.yaml. + kubectl get -k dir/ + # Return only the phase value of the specified pod. kubectl get -o template pod/web-pod-13je7 --template={{.status.phase}} @@ -257,7 +260,7 @@ func (o *GetOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []stri switch { case o.Watch || o.WatchOnly: default: - if len(args) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames) { + if len(args) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames, o.Kustomize) { fmt.Fprintf(o.ErrOut, "You must specify the type of resource to get. %s\n\n", cmdutil.SuggestAPIResources(o.CmdParent)) fullCmdName := cmd.Parent().CommandPath() usageString := "Required resource not specified." diff --git a/pkg/kubectl/cmd/label/label.go b/pkg/kubectl/cmd/label/label.go index 63781453f5..eca1fbbbed 100644 --- a/pkg/kubectl/cmd/label/label.go +++ b/pkg/kubectl/cmd/label/label.go @@ -205,7 +205,7 @@ func (o *LabelOptions) Validate() error { if o.all && len(o.fieldSelector) > 0 { return fmt.Errorf("cannot set --all and --field-selector at the same time") } - if len(o.resources) < 1 && cmdutil.IsFilenameSliceEmpty(o.FilenameOptions.Filenames) { + if len(o.resources) < 1 && cmdutil.IsFilenameSliceEmpty(o.FilenameOptions.Filenames, o.FilenameOptions.Kustomize) { return fmt.Errorf("one or more resources must be specified as or /") } if len(o.newLabels) < 1 && len(o.removeLabels) < 1 && !o.list { diff --git a/pkg/kubectl/cmd/replace/replace.go b/pkg/kubectl/cmd/replace/replace.go index dd329160e9..9c3456074b 100644 --- a/pkg/kubectl/cmd/replace/replace.go +++ b/pkg/kubectl/cmd/replace/replace.go @@ -117,7 +117,6 @@ func NewCmdReplace(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobr o.DeleteFlags.AddFlags(cmd) o.RecordFlags.AddFlags(cmd) - cmd.MarkFlagRequired("filename") cmdutil.AddValidateFlags(cmd) cmdutil.AddApplyAnnotationFlags(cmd) @@ -163,6 +162,11 @@ func (o *ReplaceOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [] } o.DeleteOptions = deleteOpts + err = o.DeleteOptions.FilenameOptions.RequireFilenameOrKustomize() + if err != nil { + return err + } + schema, err := f.Validator(o.validate) if err != nil { return err @@ -189,7 +193,7 @@ func (o *ReplaceOptions) Validate(cmd *cobra.Command) error { return fmt.Errorf("--timeout must have --force specified") } - if cmdutil.IsFilenameSliceEmpty(o.DeleteOptions.FilenameOptions.Filenames) { + if cmdutil.IsFilenameSliceEmpty(o.DeleteOptions.FilenameOptions.Filenames, o.DeleteOptions.FilenameOptions.Kustomize) { return cmdutil.UsageErrorf(cmd, "Must specify --filename to replace") } diff --git a/pkg/kubectl/cmd/rollout/rollout_history.go b/pkg/kubectl/cmd/rollout/rollout_history.go index d924550818..3661a62400 100644 --- a/pkg/kubectl/cmd/rollout/rollout_history.go +++ b/pkg/kubectl/cmd/rollout/rollout_history.go @@ -123,7 +123,7 @@ func (o *RolloutHistoryOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, // Validate makes sure all the provided values for command-line options are valid func (o *RolloutHistoryOptions) Validate() error { - if len(o.Resources) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames) { + if len(o.Resources) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames, o.Kustomize) { return fmt.Errorf("required resource not specified") } if o.Revision < 0 { diff --git a/pkg/kubectl/cmd/rollout/rollout_pause.go b/pkg/kubectl/cmd/rollout/rollout_pause.go index b29963d649..56dd4e1c8c 100644 --- a/pkg/kubectl/cmd/rollout/rollout_pause.go +++ b/pkg/kubectl/cmd/rollout/rollout_pause.go @@ -117,7 +117,7 @@ func (o *PauseOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []st } func (o *PauseOptions) Validate() error { - if len(o.Resources) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames) { + if len(o.Resources) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames, o.Kustomize) { return fmt.Errorf("required resource not specified") } return nil diff --git a/pkg/kubectl/cmd/rollout/rollout_resume.go b/pkg/kubectl/cmd/rollout/rollout_resume.go index f1f9e4e33d..d46e9a6ec8 100644 --- a/pkg/kubectl/cmd/rollout/rollout_resume.go +++ b/pkg/kubectl/cmd/rollout/rollout_resume.go @@ -121,7 +121,7 @@ func (o *ResumeOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []s } func (o *ResumeOptions) Validate() error { - if len(o.Resources) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames) { + if len(o.Resources) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames, o.Kustomize) { return fmt.Errorf("required resource not specified") } return nil diff --git a/pkg/kubectl/cmd/rollout/rollout_status.go b/pkg/kubectl/cmd/rollout/rollout_status.go index fde72df1d8..38a16af914 100644 --- a/pkg/kubectl/cmd/rollout/rollout_status.go +++ b/pkg/kubectl/cmd/rollout/rollout_status.go @@ -148,7 +148,7 @@ func (o *RolloutStatusOptions) Complete(f cmdutil.Factory, args []string) error // Validate makes sure all the provided values for command-line options are valid func (o *RolloutStatusOptions) Validate() error { - if len(o.BuilderArgs) == 0 && cmdutil.IsFilenameSliceEmpty(o.FilenameOptions.Filenames) { + if len(o.BuilderArgs) == 0 && cmdutil.IsFilenameSliceEmpty(o.FilenameOptions.Filenames, o.FilenameOptions.Kustomize) { return fmt.Errorf("required resource not specified") } diff --git a/pkg/kubectl/cmd/rollout/rollout_undo.go b/pkg/kubectl/cmd/rollout/rollout_undo.go index c4db88af20..a06e5c5fec 100644 --- a/pkg/kubectl/cmd/rollout/rollout_undo.go +++ b/pkg/kubectl/cmd/rollout/rollout_undo.go @@ -126,7 +126,7 @@ func (o *UndoOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []str } func (o *UndoOptions) Validate() error { - if len(o.Resources) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames) { + if len(o.Resources) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames, o.Kustomize) { return fmt.Errorf("required resource not specified") } return nil diff --git a/pkg/kubectl/cmd/run/run.go b/pkg/kubectl/cmd/run/run.go index de0f0c2725..3b9f3431fc 100644 --- a/pkg/kubectl/cmd/run/run.go +++ b/pkg/kubectl/cmd/run/run.go @@ -241,6 +241,7 @@ func (o *RunOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { deleteOpts.IgnoreNotFound = true deleteOpts.WaitForDeletion = false deleteOpts.GracePeriod = -1 + deleteOpts.Quiet = o.Quiet o.DeleteOptions = deleteOpts diff --git a/pkg/kubectl/cmd/scale/scale.go b/pkg/kubectl/cmd/scale/scale.go index 997d37cefb..ef2d32a43d 100644 --- a/pkg/kubectl/cmd/scale/scale.go +++ b/pkg/kubectl/cmd/scale/scale.go @@ -228,7 +228,7 @@ func (o *ScaleOptions) RunScale() error { // go down the legacy jobs path. This can be removed in 3.14 For now, contain it. fmt.Fprintf(o.ErrOut, "%s scale job is DEPRECATED and will be removed in a future version.\n", o.parent) - if err := ScaleJob(info, o.clientSet.Batch(), uint(o.Replicas), precondition, retry, waitForReplicas); err != nil { + if err := ScaleJob(info, o.clientSet.BatchV1(), uint(o.Replicas), precondition, retry, waitForReplicas); err != nil { return err } diff --git a/pkg/kubectl/cmd/scale/scalejob_test.go b/pkg/kubectl/cmd/scale/scalejob_test.go index ec1734cafa..4ed570aca9 100644 --- a/pkg/kubectl/cmd/scale/scalejob_test.go +++ b/pkg/kubectl/cmd/scale/scalejob_test.go @@ -69,7 +69,7 @@ func (c *errorJobClient) Jobs(namespace string) batchclient.JobInterface { } func TestJobScaleRetry(t *testing.T) { - fake := &errorJobClient{JobsGetter: fake.NewSimpleClientset().Batch(), conflict: true} + fake := &errorJobClient{JobsGetter: fake.NewSimpleClientset().BatchV1(), conflict: true} scaler := &JobPsuedoScaler{JobsClient: fake} preconditions := ScalePrecondition{-1, ""} count := uint(3) @@ -103,7 +103,7 @@ func job() *batch.Job { func TestJobScale(t *testing.T) { fakeClientset := fake.NewSimpleClientset(job()) - scaler := &JobPsuedoScaler{JobsClient: fakeClientset.Batch()} + scaler := &JobPsuedoScaler{JobsClient: fakeClientset.BatchV1()} preconditions := ScalePrecondition{-1, ""} count := uint(3) name := "foo" @@ -122,7 +122,7 @@ func TestJobScale(t *testing.T) { } func TestJobScaleInvalid(t *testing.T) { - fake := &errorJobClient{JobsGetter: fake.NewSimpleClientset().Batch(), invalid: true} + fake := &errorJobClient{JobsGetter: fake.NewSimpleClientset().BatchV1(), invalid: true} scaler := &JobPsuedoScaler{JobsClient: fake} preconditions := ScalePrecondition{-1, ""} count := uint(3) @@ -150,7 +150,7 @@ func TestJobScaleFailsPreconditions(t *testing.T) { Parallelism: &ten, }, }) - scaler := &JobPsuedoScaler{JobsClient: fake.Batch()} + scaler := &JobPsuedoScaler{JobsClient: fake.BatchV1()} preconditions := ScalePrecondition{2, ""} count := uint(3) name := "foo" diff --git a/pkg/kubectl/cmd/set/set_image.go b/pkg/kubectl/cmd/set/set_image.go index ef422879ba..7cf1b13f36 100644 --- a/pkg/kubectl/cmd/set/set_image.go +++ b/pkg/kubectl/cmd/set/set_image.go @@ -198,7 +198,7 @@ func (o *SetImageOptions) Validate() error { if o.All && len(o.Selector) > 0 { errors = append(errors, fmt.Errorf("cannot set --all and --selector at the same time")) } - if len(o.Resources) < 1 && cmdutil.IsFilenameSliceEmpty(o.Filenames) { + if len(o.Resources) < 1 && cmdutil.IsFilenameSliceEmpty(o.Filenames, o.Kustomize) { errors = append(errors, fmt.Errorf("one or more resources must be specified as or /")) } if len(o.ContainerImages) < 1 { diff --git a/pkg/kubectl/cmd/top/top_node.go b/pkg/kubectl/cmd/top/top_node.go index 48e7f48e0c..073b2ccdc0 100644 --- a/pkg/kubectl/cmd/top/top_node.go +++ b/pkg/kubectl/cmd/top/top_node.go @@ -219,7 +219,7 @@ func (o TopNodeOptions) RunTopNode() error { func getNodeMetricsFromMetricsAPI(metricsClient metricsclientset.Interface, resourceName string, selector labels.Selector) (*metricsapi.NodeMetricsList, error) { var err error versionedMetrics := &metricsV1beta1api.NodeMetricsList{} - mc := metricsClient.Metrics() + mc := metricsClient.MetricsV1beta1() nm := mc.NodeMetricses() if resourceName != "" { m, err := nm.Get(resourceName, metav1.GetOptions{}) diff --git a/pkg/kubectl/cmd/top/top_pod.go b/pkg/kubectl/cmd/top/top_pod.go index 1ba4c72216..63938cab37 100644 --- a/pkg/kubectl/cmd/top/top_pod.go +++ b/pkg/kubectl/cmd/top/top_pod.go @@ -205,13 +205,13 @@ func getMetricsFromMetricsAPI(metricsClient metricsclientset.Interface, namespac } versionedMetrics := &metricsv1beta1api.PodMetricsList{} if resourceName != "" { - m, err := metricsClient.Metrics().PodMetricses(ns).Get(resourceName, metav1.GetOptions{}) + m, err := metricsClient.MetricsV1beta1().PodMetricses(ns).Get(resourceName, metav1.GetOptions{}) if err != nil { return nil, err } versionedMetrics.Items = []metricsv1beta1api.PodMetrics{*m} } else { - versionedMetrics, err = metricsClient.Metrics().PodMetricses(ns).List(metav1.ListOptions{LabelSelector: selector.String()}) + versionedMetrics, err = metricsClient.MetricsV1beta1().PodMetricses(ns).List(metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { return nil, err } diff --git a/pkg/kubectl/cmd/util/helpers.go b/pkg/kubectl/cmd/util/helpers.go index 0c6bf585a9..e024d4e212 100644 --- a/pkg/kubectl/cmd/util/helpers.go +++ b/pkg/kubectl/cmd/util/helpers.go @@ -291,8 +291,8 @@ func UsageErrorf(cmd *cobra.Command, format string, args ...interface{}) error { return fmt.Errorf("%s\nSee '%s -h' for help and examples", msg, cmd.CommandPath()) } -func IsFilenameSliceEmpty(filenames []string) bool { - return len(filenames) == 0 +func IsFilenameSliceEmpty(filenames []string, directory string) bool { + return len(filenames) == 0 && directory == "" } func GetFlagString(cmd *cobra.Command, flag string) string { @@ -382,6 +382,7 @@ func AddValidateOptionFlags(cmd *cobra.Command, options *ValidateOptions) { func AddFilenameOptionFlags(cmd *cobra.Command, options *resource.FilenameOptions, usage string) { AddJsonFilenameFlag(cmd.Flags(), &options.Filenames, "Filename, directory, or URL to files "+usage) + AddKustomizeFlag(cmd.Flags(), &options.Kustomize) cmd.Flags().BoolVarP(&options.Recursive, "recursive", "R", options.Recursive, "Process the directory used in -f, --filename recursively. Useful when you want to manage related manifests organized within the same directory.") } @@ -394,6 +395,11 @@ func AddJsonFilenameFlag(flags *pflag.FlagSet, value *[]string, usage string) { flags.SetAnnotation("filename", cobra.BashCompFilenameExt, annotations) } +// AddKustomizeFlag adds kustomize flag to a command +func AddKustomizeFlag(flags *pflag.FlagSet, value *string) { + flags.StringVarP(value, "kustomize", "k", *value, "Process the kustomization directory. This flag can't be used together with -f or -R.") +} + // AddDryRunFlag adds dry-run flag to a command. Usually used by mutations. func AddDryRunFlag(cmd *cobra.Command) { cmd.Flags().Bool("dry-run", false, "If true, only print the object that would be sent, without sending it.") diff --git a/pkg/kubectl/describe/versioned/describe.go b/pkg/kubectl/describe/versioned/describe.go index f3ca513183..ab69bdf699 100644 --- a/pkg/kubectl/describe/versioned/describe.go +++ b/pkg/kubectl/describe/versioned/describe.go @@ -222,7 +222,7 @@ func GenericDescriberFor(mapping *meta.RESTMapping, clientConfig *rest.Config) ( if err != nil { return nil, false } - eventsClient := clientSet.Core() + eventsClient := clientSet.CoreV1() return &genericDescriber{mapping, dynamicClient, eventsClient}, true } @@ -349,11 +349,11 @@ type NamespaceDescriber struct { } func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - ns, err := d.Core().Namespaces().Get(name, metav1.GetOptions{}) + ns, err := d.CoreV1().Namespaces().Get(name, metav1.GetOptions{}) if err != nil { return "", err } - resourceQuotaList, err := d.Core().ResourceQuotas(name).List(metav1.ListOptions{}) + resourceQuotaList, err := d.CoreV1().ResourceQuotas(name).List(metav1.ListOptions{}) if err != nil { if errors.IsNotFound(err) { // Server does not support resource quotas. @@ -363,7 +363,7 @@ func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings return "", err } } - limitRangeList, err := d.Core().LimitRanges(name).List(metav1.ListOptions{}) + limitRangeList, err := d.CoreV1().LimitRanges(name).List(metav1.ListOptions{}) if err != nil { if errors.IsNotFound(err) { // Server does not support limit ranges. @@ -522,7 +522,7 @@ type LimitRangeDescriber struct { } func (d *LimitRangeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - lr := d.Core().LimitRanges(namespace) + lr := d.CoreV1().LimitRanges(namespace) limitRange, err := lr.Get(name, metav1.GetOptions{}) if err != nil { @@ -549,7 +549,7 @@ type ResourceQuotaDescriber struct { } func (d *ResourceQuotaDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - rq := d.Core().ResourceQuotas(namespace) + rq := d.CoreV1().ResourceQuotas(namespace) resourceQuota, err := rq.Get(name, metav1.GetOptions{}) if err != nil { @@ -619,10 +619,10 @@ type PodDescriber struct { } func (d *PodDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - pod, err := d.Core().Pods(namespace).Get(name, metav1.GetOptions{}) + pod, err := d.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) if err != nil { if describerSettings.ShowEvents { - eventsInterface := d.Core().Events(namespace) + eventsInterface := d.CoreV1().Events(namespace) selector := eventsInterface.GetFieldSelector(&name, &namespace, nil, nil) options := metav1.ListOptions{FieldSelector: selector.String()} events, err2 := eventsInterface.List(options) @@ -647,7 +647,7 @@ func (d *PodDescriber) Describe(namespace, name string, describerSettings descri if _, isMirrorPod := pod.Annotations[corev1.MirrorPodAnnotationKey]; isMirrorPod { ref.UID = types.UID(pod.Annotations[corev1.MirrorPodAnnotationKey]) } - events, _ = d.Core().Events(namespace).Search(scheme.Scheme, ref) + events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, ref) } } @@ -1263,7 +1263,7 @@ type PersistentVolumeDescriber struct { } func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.Core().PersistentVolumes() + c := d.CoreV1().PersistentVolumes() pv, err := c.Get(name, metav1.GetOptions{}) if err != nil { @@ -1272,7 +1272,7 @@ func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSe var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(scheme.Scheme, pv) + events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, pv) } return describePersistentVolume(pv, events) @@ -1411,21 +1411,21 @@ type PersistentVolumeClaimDescriber struct { } func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.Core().PersistentVolumeClaims(namespace) + c := d.CoreV1().PersistentVolumeClaims(namespace) pvc, err := c.Get(name, metav1.GetOptions{}) if err != nil { return "", err } - pc := d.Core().Pods(namespace) + pc := d.CoreV1().Pods(namespace) mountPods, err := getMountPods(pc, pvc.Name) if err != nil { return "", err } - events, _ := d.Core().Events(namespace).Search(scheme.Scheme, pvc) + events, _ := d.CoreV1().Events(namespace).Search(scheme.Scheme, pvc) return describePersistentVolumeClaim(pvc, events, mountPods) } @@ -1872,8 +1872,8 @@ type ReplicationControllerDescriber struct { } func (d *ReplicationControllerDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - rc := d.Core().ReplicationControllers(namespace) - pc := d.Core().Pods(namespace) + rc := d.CoreV1().ReplicationControllers(namespace) + pc := d.CoreV1().Pods(namespace) controller, err := rc.Get(name, metav1.GetOptions{}) if err != nil { @@ -1887,7 +1887,7 @@ func (d *ReplicationControllerDescriber) Describe(namespace, name string, descri var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(scheme.Scheme, controller) + events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, controller) } return describeReplicationController(controller, events, running, waiting, succeeded, failed) @@ -1944,8 +1944,8 @@ type ReplicaSetDescriber struct { } func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - rsc := d.Apps().ReplicaSets(namespace) - pc := d.Core().Pods(namespace) + rsc := d.AppsV1().ReplicaSets(namespace) + pc := d.CoreV1().Pods(namespace) rs, err := rsc.Get(name, metav1.GetOptions{}) if err != nil { @@ -1961,7 +1961,7 @@ func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(scheme.Scheme, rs) + events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, rs) } return describeReplicaSet(rs, events, running, waiting, succeeded, failed, getPodErr) @@ -2006,14 +2006,14 @@ type JobDescriber struct { } func (d *JobDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - job, err := d.Batch().Jobs(namespace).Get(name, metav1.GetOptions{}) + job, err := d.BatchV1().Jobs(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(scheme.Scheme, job) + events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, job) } return describeJob(job, events) @@ -2160,8 +2160,8 @@ type DaemonSetDescriber struct { } func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - dc := d.Apps().DaemonSets(namespace) - pc := d.Core().Pods(namespace) + dc := d.AppsV1().DaemonSets(namespace) + pc := d.CoreV1().Pods(namespace) daemon, err := dc.Get(name, metav1.GetOptions{}) if err != nil { @@ -2179,7 +2179,7 @@ func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(scheme.Scheme, daemon) + events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, daemon) } return describeDaemonSet(daemon, events, running, waiting, succeeded, failed) @@ -2218,7 +2218,7 @@ type SecretDescriber struct { } func (d *SecretDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.Core().Secrets(namespace) + c := d.CoreV1().Secrets(namespace) secret, err := c.Get(name, metav1.GetOptions{}) if err != nil { @@ -2258,7 +2258,7 @@ type IngressDescriber struct { } func (i *IngressDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := i.Extensions().Ingresses(namespace) + c := i.ExtensionsV1beta1().Ingresses(namespace) ing, err := c.Get(name, metav1.GetOptions{}) if err != nil { return "", err @@ -2267,8 +2267,8 @@ func (i *IngressDescriber) Describe(namespace, name string, describerSettings de } func (i *IngressDescriber) describeBackend(ns string, backend *extensionsv1beta1.IngressBackend) string { - endpoints, _ := i.Core().Endpoints(ns).Get(backend.ServiceName, metav1.GetOptions{}) - service, _ := i.Core().Services(ns).Get(backend.ServiceName, metav1.GetOptions{}) + endpoints, _ := i.CoreV1().Endpoints(ns).Get(backend.ServiceName, metav1.GetOptions{}) + service, _ := i.CoreV1().Services(ns).Get(backend.ServiceName, metav1.GetOptions{}) spName := "" for i := range service.Spec.Ports { sp := &service.Spec.Ports[i] @@ -2330,7 +2330,7 @@ func (i *IngressDescriber) describeIngress(ing *extensionsv1beta1.Ingress, descr describeIngressAnnotations(w, ing.Annotations) if describerSettings.ShowEvents { - events, _ := i.Core().Events(ing.Namespace).Search(scheme.Scheme, ing) + events, _ := i.CoreV1().Events(ing.Namespace).Search(scheme.Scheme, ing) if events != nil { DescribeEvents(events, w) } @@ -2366,17 +2366,17 @@ type ServiceDescriber struct { } func (d *ServiceDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.Core().Services(namespace) + c := d.CoreV1().Services(namespace) service, err := c.Get(name, metav1.GetOptions{}) if err != nil { return "", err } - endpoints, _ := d.Core().Endpoints(namespace).Get(name, metav1.GetOptions{}) + endpoints, _ := d.CoreV1().Endpoints(namespace).Get(name, metav1.GetOptions{}) var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(scheme.Scheme, service) + events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, service) } return describeService(service, endpoints, events) } @@ -2464,7 +2464,7 @@ type EndpointsDescriber struct { } func (d *EndpointsDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.Core().Endpoints(namespace) + c := d.CoreV1().Endpoints(namespace) ep, err := c.Get(name, metav1.GetOptions{}) if err != nil { @@ -2473,7 +2473,7 @@ func (d *EndpointsDescriber) Describe(namespace, name string, describerSettings var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(scheme.Scheme, ep) + events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, ep) } return describeEndpoints(ep, events) @@ -2539,7 +2539,7 @@ type ServiceAccountDescriber struct { } func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.Core().ServiceAccounts(namespace) + c := d.CoreV1().ServiceAccounts(namespace) serviceAccount, err := c.Get(name, metav1.GetOptions{}) if err != nil { @@ -2551,7 +2551,7 @@ func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSett // missingSecrets is the set of all secrets present in the // serviceAccount but not present in the set of existing secrets. missingSecrets := sets.NewString() - secrets, err := d.Core().Secrets(namespace).List(metav1.ListOptions{}) + secrets, err := d.CoreV1().Secrets(namespace).List(metav1.ListOptions{}) // errors are tolerated here in order to describe the serviceAccount with all // of the secrets that it references, even if those secrets cannot be fetched. @@ -2585,7 +2585,7 @@ func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSett var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(scheme.Scheme, serviceAccount) + events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, serviceAccount) } return describeServiceAccount(serviceAccount, tokens, missingSecrets, events) @@ -2656,7 +2656,7 @@ type RoleDescriber struct { } func (d *RoleDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - role, err := d.Rbac().Roles(namespace).Get(name, metav1.GetOptions{}) + role, err := d.RbacV1().Roles(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2695,7 +2695,7 @@ type ClusterRoleDescriber struct { } func (d *ClusterRoleDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - role, err := d.Rbac().ClusterRoles().Get(name, metav1.GetOptions{}) + role, err := d.RbacV1().ClusterRoles().Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2751,7 +2751,7 @@ type RoleBindingDescriber struct { } func (d *RoleBindingDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - binding, err := d.Rbac().RoleBindings(namespace).Get(name, metav1.GetOptions{}) + binding, err := d.RbacV1().RoleBindings(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2783,7 +2783,7 @@ type ClusterRoleBindingDescriber struct { } func (d *ClusterRoleBindingDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - binding, err := d.Rbac().ClusterRoleBindings().Get(name, metav1.GetOptions{}) + binding, err := d.RbacV1().ClusterRoleBindings().Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2815,7 +2815,7 @@ type NodeDescriber struct { } func (d *NodeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - mc := d.Core().Nodes() + mc := d.CoreV1().Nodes() node, err := mc.Get(name, metav1.GetOptions{}) if err != nil { return "", err @@ -2828,7 +2828,7 @@ func (d *NodeDescriber) Describe(namespace, name string, describerSettings descr // in a policy aware setting, users may have access to a node, but not all pods // in that case, we note that the user does not have access to the pods canViewPods := true - nodeNonTerminatedPodsList, err := d.Core().Pods(namespace).List(metav1.ListOptions{FieldSelector: fieldSelector.String()}) + nodeNonTerminatedPodsList, err := d.CoreV1().Pods(namespace).List(metav1.ListOptions{FieldSelector: fieldSelector.String()}) if err != nil { if !errors.IsForbidden(err) { return "", err @@ -2843,7 +2843,7 @@ func (d *NodeDescriber) Describe(namespace, name string, describerSettings descr } else { // TODO: We haven't decided the namespace for Node object yet. ref.UID = types.UID(ref.Name) - events, _ = d.Core().Events("").Search(scheme.Scheme, ref) + events, _ = d.CoreV1().Events("").Search(scheme.Scheme, ref) } } @@ -2939,11 +2939,11 @@ type StatefulSetDescriber struct { } func (p *StatefulSetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - ps, err := p.client.Apps().StatefulSets(namespace).Get(name, metav1.GetOptions{}) + ps, err := p.client.AppsV1().StatefulSets(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } - pc := p.client.Core().Pods(namespace) + pc := p.client.CoreV1().Pods(namespace) selector, err := metav1.LabelSelectorAsSelector(ps.Spec.Selector) if err != nil { @@ -2957,7 +2957,7 @@ func (p *StatefulSetDescriber) Describe(namespace, name string, describerSetting var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = p.client.Core().Events(namespace).Search(scheme.Scheme, ps) + events, _ = p.client.CoreV1().Events(namespace).Search(scheme.Scheme, ps) } return describeStatefulSet(ps, selector, events, running, waiting, succeeded, failed) @@ -2997,7 +2997,7 @@ type CertificateSigningRequestDescriber struct { } func (p *CertificateSigningRequestDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - csr, err := p.client.Certificates().CertificateSigningRequests().Get(name, metav1.GetOptions{}) + csr, err := p.client.CertificatesV1beta1().CertificateSigningRequests().Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -3013,7 +3013,7 @@ func (p *CertificateSigningRequestDescriber) Describe(namespace, name string, de var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = p.client.Core().Events(namespace).Search(scheme.Scheme, csr) + events, _ = p.client.CoreV1().Events(namespace).Search(scheme.Scheme, csr) } return describeCertificateSigningRequest(csr, cr, status, events) @@ -3081,7 +3081,7 @@ func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string, desc var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.client.Core().Events(namespace).Search(scheme.Scheme, hpa) + events, _ = d.client.CoreV1().Events(namespace).Search(scheme.Scheme, hpa) } return describeHorizontalPodAutoscaler(hpa, events, d) @@ -3411,7 +3411,7 @@ type ConfigMapDescriber struct { } func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.Core().ConfigMaps(namespace) + c := d.CoreV1().ConfigMaps(namespace) configMap, err := c.Get(name, metav1.GetOptions{}) if err != nil { @@ -3431,7 +3431,7 @@ func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings w.Write(LEVEL_0, "%s\n", string(v)) } if describerSettings.ShowEvents { - events, err := d.Core().Events(namespace).Search(scheme.Scheme, configMap) + events, err := d.CoreV1().Events(namespace).Search(scheme.Scheme, configMap) if err != nil { return err } @@ -3449,7 +3449,7 @@ type NetworkPolicyDescriber struct { } func (d *NetworkPolicyDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.Networking().NetworkPolicies(namespace) + c := d.NetworkingV1().NetworkPolicies(namespace) networkPolicy, err := c.Get(name, metav1.GetOptions{}) if err != nil { @@ -3580,14 +3580,14 @@ type StorageClassDescriber struct { } func (s *StorageClassDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - sc, err := s.Storage().StorageClasses().Get(name, metav1.GetOptions{}) + sc, err := s.StorageV1().StorageClasses().Get(name, metav1.GetOptions{}) if err != nil { return "", err } var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = s.Core().Events(namespace).Search(scheme.Scheme, sc) + events, _ = s.CoreV1().Events(namespace).Search(scheme.Scheme, sc) } return describeStorageClass(sc, events) @@ -3664,14 +3664,14 @@ type PodDisruptionBudgetDescriber struct { } func (p *PodDisruptionBudgetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - pdb, err := p.Policy().PodDisruptionBudgets(namespace).Get(name, metav1.GetOptions{}) + pdb, err := p.PolicyV1beta1().PodDisruptionBudgets(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = p.Core().Events(namespace).Search(scheme.Scheme, pdb) + events, _ = p.CoreV1().Events(namespace).Search(scheme.Scheme, pdb) } return describePodDisruptionBudget(pdb, events) @@ -3720,7 +3720,7 @@ func (s *PriorityClassDescriber) Describe(namespace, name string, describerSetti var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = s.Core().Events(namespace).Search(scheme.Scheme, pc) + events, _ = s.CoreV1().Events(namespace).Search(scheme.Scheme, pc) } return describePriorityClass(pc, events) @@ -3749,7 +3749,7 @@ type PodSecurityPolicyDescriber struct { } func (d *PodSecurityPolicyDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - psp, err := d.Policy().PodSecurityPolicies().Get(name, metav1.GetOptions{}) + psp, err := d.PolicyV1beta1().PodSecurityPolicies().Get(name, metav1.GetOptions{}) if err != nil { return "", err } diff --git a/pkg/kubectl/drain/cordon.go b/pkg/kubectl/drain/cordon.go index e3eb77fdab..fc33975266 100644 --- a/pkg/kubectl/drain/cordon.go +++ b/pkg/kubectl/drain/cordon.go @@ -73,7 +73,7 @@ func (c *CordonHelper) UpdateIfRequired(desired bool) bool { // JSON, or if either patch or update calls fail; it will also return a second error // whenever creating a patch has failed func (c *CordonHelper) PatchOrReplace(clientset kubernetes.Interface) (error, error) { - client := clientset.Core().Nodes() + client := clientset.CoreV1().Nodes() oldData, err := json.Marshal(c.node) if err != nil { diff --git a/pkg/kubectl/generated/bindata.go b/pkg/kubectl/generated/bindata.go index 085a8912cd..63c56b5920 100644 --- a/pkg/kubectl/generated/bindata.go +++ b/pkg/kubectl/generated/bindata.go @@ -13902,7 +13902,7 @@ func translationsKubectlIt_itLc_messagesK8sPo() (*asset, error) { return a, nil } -var _translationsKubectlJa_jpLc_messagesK8sMo = []byte("\xde\x12\x04\x95\x00\x00\x00\x00\x11\x00\x00\x00\x1c\x00\x00\x00\xa4\x00\x00\x00\x17\x00\x00\x00,\x01\x00\x00\x00\x00\x00\x00\x88\x01\x00\x008\x00\x00\x00\x89\x01\x00\x000\x00\x00\x00\xc2\x01\x00\x000\x00\x00\x00\xf3\x01\x00\x00\x1d\x00\x00\x00$\x02\x00\x00*\x00\x00\x00B\x02\x00\x00A\x00\x00\x00m\x02\x00\x00\x1c\x00\x00\x00\xaf\x02\x00\x00\x17\x00\x00\x00\xcc\x02\x00\x00\"\x00\x00\x00\xe4\x02\x00\x00\"\x00\x00\x00\a\x03\x00\x00\x1f\x00\x00\x00*\x03\x00\x00-\x00\x00\x00J\x03\x00\x00-\x00\x00\x00x\x03\x00\x00/\x00\x00\x00\xa6\x03\x00\x00$\x00\x00\x00\xd6\x03\x00\x00\xc5\x00\x00\x00\xfb\x03\x00\x00\xa6\x01\x00\x00\xc1\x04\x00\x00c\x00\x00\x00h\x06\x00\x00:\x00\x00\x00\xcc\x06\x00\x00=\x00\x00\x00\a\a\x00\x007\x00\x00\x00E\a\x00\x00:\x00\x00\x00}\a\x00\x00b\x00\x00\x00\xb8\a\x00\x00-\x00\x00\x00\x1b\b\x00\x00%\x00\x00\x00I\b\x00\x007\x00\x00\x00o\b\x00\x00:\x00\x00\x00\xa7\b\x00\x004\x00\x00\x00\xe2\b\x00\x00:\x00\x00\x00\x17\t\x00\x00:\x00\x00\x00R\t\x00\x00:\x00\x00\x00\x8d\t\x00\x003\x00\x00\x00\xc8\t\x00\x00\x1d\x01\x00\x00\xfc\t\x00\x00\x01\x00\x00\x00\n\x00\x00\x00\v\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\t\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\a\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\b\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\f\x00\x00\x00\x05\x00\x00\x00\r\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00Apply a configuration to a resource by filename or stdin\x00Delete the specified cluster from the kubeconfig\x00Delete the specified context from the kubeconfig\x00Describe one or many contexts\x00Display clusters defined in the kubeconfig\x00Display merged kubeconfig settings or a specified kubeconfig file\x00Displays the current-context\x00Modify kubeconfig files\x00Sets a cluster entry in kubeconfig\x00Sets a context entry in kubeconfig\x00Sets a user entry in kubeconfig\x00Sets an individual value in a kubeconfig file\x00Sets the current-context in a kubeconfig file\x00Unsets an individual value in a kubeconfig file\x00Update the annotations on a resource\x00watch is only supported on individual resources and resource collections - %d resources were found\x00watch is only supported on individual resources and resource collections - %d resources were found\x00Project-Id-Version: gettext-go-examples-hello\nReport-Msgid-Bugs-To: \nPOT-Creation-Date: 2013-12-12 20:03+0000\nPO-Revision-Date: 2017-01-29 22:54-0800\nLast-Translator: Giri Kuncoro \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: Poedit 1.6.10\nX-Poedit-SourceCharset: UTF-8\nLanguage-Team: \nPlural-Forms: nplurals=2; plural=(n > 1);\nLanguage: ja\n\x00\u30d5\u30a1\u30a4\u30eb\u540d\u3092\u6307\u5b9a\u307e\u305f\u306f\u6a19\u6e96\u5165\u529b\u7d4c\u7531\u3067\u30ea\u30bd\u30fc\u30b9\u306b\u30b3\u30f3\u30d5\u30a3\u30b0\u3092\u9069\u7528\u3059\u308b\x00kubeconfig\u304b\u3089\u6307\u5b9a\u3057\u305f\u30af\u30e9\u30b9\u30bf\u30fc\u3092\u524a\u9664\u3059\u308b\x00kubeconfig\u304b\u3089\u6307\u5b9a\u3057\u305f\u30b3\u30f3\u30c6\u30ad\u30b9\u30c8\u3092\u524a\u9664\u3059\u308b\x001\u3064\u307e\u305f\u306f\u8907\u6570\u306e\u30b3\u30f3\u30c6\u30ad\u30b9\u30c8\u3092\u8a18\u8ff0\u3059\u308b\x00kubeconfig\u3067\u5b9a\u7fa9\u3055\u308c\u305f\u30af\u30e9\u30b9\u30bf\u30fc\u3092\u8868\u793a\u3059\u308b\x00\u30de\u30fc\u30b8\u3055\u308c\u305fkubeconfig\u306e\u8a2d\u5b9a\u307e\u305f\u306f\u6307\u5b9a\u3055\u308c\u305fkubeconfig\u30d5\u30a1\u30a4\u30eb\u3092\u8868\u793a\u3059\u308b\x00\u30ab\u30ec\u30f3\u30c8\u30b3\u30f3\u30c6\u30ad\u30b9\u30c8\u3092\u8868\u793a\u3059\u308b\x00kubeconfig\u30d5\u30a1\u30a4\u30eb\u3092\u5909\u66f4\u3059\u308b\x00kubeconfig\u306b\u30af\u30e9\u30b9\u30bf\u30fc\u30a8\u30f3\u30c8\u30ea\u3092\u8a2d\u5b9a\u3059\u308b\x00kubeconfig\u306b\u30b3\u30f3\u30c6\u30ad\u30b9\u30c8\u30a8\u30f3\u30c8\u30ea\u3092\u8a2d\u5b9a\u3059\u308b\x00kubeconfig\u306b\u30e6\u30fc\u30b6\u30fc\u30a8\u30f3\u30c8\u30ea\u3092\u8a2d\u5b9a\u3059\u308b\x00kubeconfig\u30d5\u30a1\u30a4\u30eb\u5185\u306e\u5909\u6570\u3092\u500b\u5225\u306b\u8a2d\u5b9a\u3059\u308b\x00kubeconfig\u306b\u30ab\u30ec\u30f3\u30c8\u30b3\u30f3\u30c6\u30ad\u30b9\u30c8\u3092\u8a2d\u5b9a\u3059\u308b\x00kubeconfig\u30d5\u30a1\u30a4\u30eb\u304b\u3089\u5909\u6570\u3092\u500b\u5225\u306b\u524a\u9664\u3059\u308b\x00\u30ea\u30bd\u30fc\u30b9\u306e\u30a2\u30ce\u30c6\u30fc\u30b7\u30e7\u30f3\u3092\u66f4\u65b0\u3059\u308b\x00watch\u306f\u5358\u4e00\u30ea\u30bd\u30fc\u30b9\u53ca\u3073\u30ea\u30bd\u30fc\u30b9\u30b3\u30ec\u30af\u30b7\u30e7\u30f3\u306e\u307f\u30b5\u30dd\u30fc\u30c8\u3057\u3066\u3044\u307e\u3059 - %d\u500b\u306e\u30ea\u30bd\u30fc\u30b9\u304c\u898b\u3064\u304b\u308a\u307e\u3057\u305f\x00watch\u306f\u5358\u4e00\u30ea\u30bd\u30fc\u30b9\u53ca\u3073\u30ea\u30bd\u30fc\u30b9\u30b3\u30ec\u30af\u30b7\u30e7\u30f3\u306e\u307f\u30b5\u30dd\u30fc\u30c8\u3057\u3066\u3044\u307e\u3059 - %d\u500b\u306e\u30ea\u30bd\u30fc\u30b9\u304c\u898b\u3064\u304b\u308a\u307e\u3057\u305f\x00") +var _translationsKubectlJa_jpLc_messagesK8sMo = []byte("\xde\x12\x04\x95\x00\x00\x00\x00\xeb\x00\x00\x00\x1c\x00\x00\x00t\a\x00\x009\x01\x00\x00\xcc\x0e\x00\x00\x00\x00\x00\x00\xb0\x13\x00\x00\xdc\x00\x00\x00\xb1\x13\x00\x00\xb6\x00\x00\x00\x8e\x14\x00\x00\v\x02\x00\x00E\x15\x00\x00\x1f\x01\x00\x00Q\x17\x00\x00z\x00\x00\x00q\x18\x00\x00_\x02\x00\x00\xec\x18\x00\x00\u007f\x01\x00\x00L\x1b\x00\x00\x8f\x01\x00\x00\xcc\x1c\x00\x00k\x01\x00\x00\\\x1e\x00\x00k\x01\x00\x00\xc8\x1f\x00\x00>\x01\x00\x004!\x00\x00\x03\x02\x00\x00s\"\x00\x00o\x01\x00\x00w$\x00\x00H\x05\x00\x00\xe7%\x00\x00g\x02\x00\x000+\x00\x00\x1b\x02\x00\x00\x98-\x00\x00q\x01\x00\x00\xb4/\x00\x00\xa8\x01\x00\x00&1\x00\x00\xd4\x01\x00\x00\xcf2\x00\x00\x02\x02\x00\x00\xa44\x00\x00\xb4\x00\x00\x00\xa76\x00\x00\xb7\x02\x00\x00\\7\x00\x00\x92\x03\x00\x00\x14:\x00\x00\xbf\x01\x00\x00\xa7=\x00\x00=\x00\x00\x00g?\x00\x00;\x00\x00\x00\xa5?\x00\x00\xcd\x02\x00\x00\xe1?\x00\x00<\x00\x00\x00\xafB\x00\x00P\x00\x00\x00\xecB\x00\x00S\x00\x00\x00=C\x00\x00<\x00\x00\x00\x91C\x00\x00\xac\x01\x00\x00\xceC\x00\x00\x13\x03\x00\x00{E\x00\x00\xea\x01\x00\x00\x8fH\x00\x00\xfa\x01\x00\x00zJ\x00\x00\xda\x01\x00\x00uL\x00\x00c\x01\x00\x00PN\x00\x00T\x01\x00\x00\xb4O\x00\x00\xba\x06\x00\x00\tQ\x00\x00\xf9\x01\x00\x00\xc4W\x00\x00\xe0\x02\x00\x00\xbeY\x00\x00\x02\x03\x00\x00\x9f\\\x00\x00\xfb\x00\x00\x00\xa2_\x00\x00\xa5\x01\x00\x00\x9e`\x00\x00\xb4\x01\x00\x00Db\x00\x00\x18\x00\x00\x00\xf9c\x00\x00<\x00\x00\x00\x12d\x00\x00=\x00\x00\x00Od\x00\x00\xc6\x00\x00\x00\x8dd\x00\x00g\x02\x00\x00Te\x00\x00.\x00\x00\x00\xbcg\x00\x001\x03\x00\x00\xebg\x00\x00g\x00\x00\x00\x1dk\x00\x00Q\x00\x00\x00\x85k\x00\x00R\x00\x00\x00\xd7k\x00\x00\"\x00\x00\x00*l\x00\x00X\x02\x00\x00Ml\x00\x004\x00\x00\x00\xa6n\x00\x00}\x00\x00\x00\xdbn\x00\x00k\x01\x00\x00Yo\x00\x00\x81\a\x00\x00\xc5p\x00\x00f\x01\x00\x00Gx\x00\x00\x85\x00\x00\x00\xaey\x00\x00\xea\x00\x00\x004z\x00\x00\xd9\x00\x00\x00\x1f{\x00\x00\n\x05\x00\x00\xf9{\x00\x00\x10\x05\x00\x00\x04\x81\x00\x00\x1c\x00\x00\x00\x15\x86\x00\x00\x1e\x00\x00\x002\x86\x00\x00\x98\x02\x00\x00Q\x86\x00\x00\xbc\x01\x00\x00\xea\x88\x00\x00\x9c\x01\x00\x00\xa7\x8a\x00\x00q\x01\x00\x00D\x8c\x00\x00\x05\x01\x00\x00\xb6\x8d\x00\x00\xdf\x01\x00\x00\xbc\x8e\x00\x00\x1c\x01\x00\x00\x9c\x90\x00\x00\xc1\x01\x00\x00\xb9\x91\x00\x00\x1b\x02\x00\x00{\x93\x00\x00\xc0\x00\x00\x00\x97\x95\x00\x00\xd5\x02\x00\x00X\x96\x00\x00\x9d\x00\x00\x00.\x99\x00\x00X\x00\x00\x00\u0319\x00\x00%\x02\x00\x00%\x9a\x00\x00o\x00\x00\x00K\x9c\x00\x00u\x00\x00\x00\xbb\x9c\x00\x00\x01\x01\x00\x001\x9d\x00\x00v\x00\x00\x003\x9e\x00\x00t\x00\x00\x00\xaa\x9e\x00\x00\xef\x00\x00\x00\x1f\x9f\x00\x00}\x00\x00\x00\x0f\xa0\x00\x00j\x00\x00\x00\x8d\xa0\x00\x00\xc4\x01\x00\x00\xf8\xa0\x00\x00\xf7\x03\x00\x00\xbd\xa2\x00\x00;\x00\x00\x00\xb5\xa6\x00\x008\x00\x00\x00\xf1\xa6\x00\x001\x00\x00\x00*\xa7\x00\x007\x00\x00\x00\\\xa7\x00\x00u\x02\x00\x00\x94\xa7\x00\x00\xb0\x00\x00\x00\n\xaa\x00\x00[\x00\x00\x00\xbb\xaa\x00\x00J\x00\x00\x00\x17\xab\x00\x00a\x00\x00\x00b\xab\x00\x00\xbd\x00\x00\x00\u012b\x00\x009\x00\x00\x00\x82\xac\x00\x00\xc5\x00\x00\x00\xbc\xac\x00\x00\xae\x00\x00\x00\x82\xad\x00\x00\xd6\x00\x00\x001\xae\x00\x008\x00\x00\x00\b\xaf\x00\x00%\x00\x00\x00A\xaf\x00\x00W\x00\x00\x00g\xaf\x00\x00\x1d\x00\x00\x00\xbf\xaf\x00\x00=\x00\x00\x00\u076f\x00\x00u\x00\x00\x00\x1b\xb0\x00\x004\x00\x00\x00\x91\xb0\x00\x00-\x00\x00\x00\u01b0\x00\x00\xa3\x00\x00\x00\xf4\xb0\x00\x003\x00\x00\x00\x98\xb1\x00\x002\x00\x00\x00\u0331\x00\x008\x00\x00\x00\xff\xb1\x00\x00\x1e\x00\x00\x008\xb2\x00\x00\x1a\x00\x00\x00W\xb2\x00\x009\x00\x00\x00r\xb2\x00\x00\x13\x00\x00\x00\xac\xb2\x00\x00\x1b\x00\x00\x00\xc0\xb2\x00\x00@\x00\x00\x00\u0732\x00\x00,\x00\x00\x00\x1d\xb3\x00\x00*\x00\x00\x00J\xb3\x00\x007\x00\x00\x00u\xb3\x00\x00'\x00\x00\x00\xad\xb3\x00\x00&\x00\x00\x00\u0573\x00\x00.\x00\x00\x00\xfc\xb3\x00\x00=\x00\x00\x00+\xb4\x00\x00*\x00\x00\x00i\xb4\x00\x000\x00\x00\x00\x94\xb4\x00\x00,\x00\x00\x00\u0174\x00\x00\x1f\x00\x00\x00\xf2\xb4\x00\x00]\x00\x00\x00\x12\xb5\x00\x000\x00\x00\x00p\xb5\x00\x000\x00\x00\x00\xa1\xb5\x00\x00\"\x00\x00\x00\u04b5\x00\x00?\x00\x00\x00\xf5\xb5\x00\x00\x1d\x00\x00\x005\xb6\x00\x00,\x00\x00\x00S\xb6\x00\x00+\x00\x00\x00\x80\xb6\x00\x00$\x00\x00\x00\xac\xb6\x00\x00\x14\x00\x00\x00\u0476\x00\x00*\x00\x00\x00\xe6\xb6\x00\x00A\x00\x00\x00\x11\xb7\x00\x00\x1d\x00\x00\x00S\xb7\x00\x00\x1c\x00\x00\x00q\xb7\x00\x00\x1a\x00\x00\x00\x8e\xb7\x00\x00)\x00\x00\x00\xa9\xb7\x00\x006\x00\x00\x00\u04f7\x00\x00\x1d\x00\x00\x00\n\xb8\x00\x00\x19\x00\x00\x00(\xb8\x00\x00 \x00\x00\x00B\xb8\x00\x00v\x00\x00\x00c\xb8\x00\x00(\x00\x00\x00\u06b8\x00\x00\x16\x00\x00\x00\x03\xb9\x00\x00p\x00\x00\x00\x1a\xb9\x00\x00`\x00\x00\x00\x8b\xb9\x00\x00\x9b\x00\x00\x00\xec\xb9\x00\x00\x97\x00\x00\x00\x88\xba\x00\x00\xa8\x00\x00\x00 \xbb\x00\x00\x1b\x00\x00\x00\u027b\x00\x00\x18\x00\x00\x00\xe5\xbb\x00\x00\x1a\x00\x00\x00\xfe\xbb\x00\x00$\x00\x00\x00\x19\xbc\x00\x00\x1d\x00\x00\x00>\xbc\x00\x00\x17\x00\x00\x00\\\xbc\x00\x00a\x00\x00\x00t\xbc\x00\x00s\x00\x00\x00\u05bc\x00\x00B\x00\x00\x00J\xbd\x00\x00Y\x00\x00\x00\x8d\xbd\x00\x00+\x00\x00\x00\xe7\xbd\x00\x00+\x00\x00\x00\x13\xbe\x00\x006\x00\x00\x00?\xbe\x00\x00;\x00\x00\x00v\xbe\x00\x00q\x00\x00\x00\xb2\xbe\x00\x00/\x00\x00\x00$\xbf\x00\x001\x00\x00\x00T\xbf\x00\x00'\x00\x00\x00\x86\xbf\x00\x00'\x00\x00\x00\xae\xbf\x00\x00\x18\x00\x00\x00\u05bf\x00\x00&\x00\x00\x00\xef\xbf\x00\x00%\x00\x00\x00\x16\xc0\x00\x00(\x00\x00\x00<\xc0\x00\x00#\x00\x00\x00e\xc0\x00\x00K\x00\x00\x00\x89\xc0\x00\x00 \x00\x00\x00\xd5\xc0\x00\x00_\x00\x00\x00\xf6\xc0\x00\x00\x1e\x00\x00\x00V\xc1\x00\x00\"\x00\x00\x00u\xc1\x00\x00\"\x00\x00\x00\x98\xc1\x00\x00\x1f\x00\x00\x00\xbb\xc1\x00\x00-\x00\x00\x00\xdb\xc1\x00\x00-\x00\x00\x00\t\xc2\x00\x009\x00\x00\x007\xc2\x00\x00\x1e\x00\x00\x00q\xc2\x00\x00\x19\x00\x00\x00\x90\xc2\x00\x00c\x00\x00\x00\xaa\xc2\x00\x00#\x00\x00\x00\x0e\xc3\x00\x00\x82\x00\x00\x002\xc3\x00\x00\x94\x00\x00\x00\xb5\xc3\x00\x00H\x00\x00\x00J\xc4\x00\x00&\x00\x00\x00\x93\xc4\x00\x00e\x00\x00\x00\xba\xc4\x00\x00z\x00\x00\x00 \xc5\x00\x00J\x00\x00\x00\x9b\xc5\x00\x00\xe5\x00\x00\x00\xe6\xc5\x00\x00W\x00\x00\x00\xcc\xc6\x00\x00E\x00\x00\x00$\xc7\x00\x00a\x00\x00\x00j\xc7\x00\x00v\x00\x00\x00\xcc\xc7\x00\x00\xcb\x00\x00\x00C\xc8\x00\x00\xcf\x00\x00\x00\x0f\xc9\x00\x00\x1e\x01\x00\x00\xdf\xc9\x00\x00\x1c\x00\x00\x00\xfe\xca\x00\x00T\x00\x00\x00\x1b\xcb\x00\x00\x17\x00\x00\x00p\xcb\x00\x00/\x00\x00\x00\x88\xcb\x00\x009\x00\x00\x00\xb8\xcb\x00\x00\x1e\x00\x00\x00\xf2\xcb\x00\x00=\x00\x00\x00\x11\xcc\x00\x00$\x00\x00\x00O\xcc\x00\x00\x1f\x00\x00\x00t\xcc\x00\x00&\x00\x00\x00\x94\xcc\x00\x00+\x00\x00\x00\xbb\xcc\x00\x00G\x00\x00\x00\xe7\xcc\x00\x00\x14\x00\x00\x00/\xcd\x00\x00r\x00\x00\x00D\xcd\x00\x00\x13\x00\x00\x00\xb7\xcd\x00\x00\x18\x00\x00\x00\xcb\xcd\x00\x00/\x00\x00\x00\xe4\xcd\x00\x00\xaa\x01\x00\x00\x14\xce\x00\x00\xe0\x00\x00\x00\xbf\xcf\x00\x00\xba\x00\x00\x00\xa0\xd0\x00\x00\x10\x02\x00\x00[\xd1\x00\x00%\x01\x00\x00l\xd3\x00\x00z\x00\x00\x00\x92\xd4\x00\x00d\x02\x00\x00\r\xd5\x00\x00\x8b\x01\x00\x00r\xd7\x00\x00\x98\x01\x00\x00\xfe\xd8\x00\x00\x84\x01\x00\x00\x97\xda\x00\x00\x88\x01\x00\x00\x1c\xdc\x00\x00A\x01\x00\x00\xa5\xdd\x00\x00\a\x02\x00\x00\xe7\xde\x00\x00q\x01\x00\x00\xef\xe0\x00\x00\\\x05\x00\x00a\xe2\x00\x00s\x02\x00\x00\xbe\xe7\x00\x00#\x02\x00\x002\xea\x00\x00w\x01\x00\x00V\xec\x00\x00\xb9\x01\x00\x00\xce\xed\x00\x00\xe5\x01\x00\x00\x88\xef\x00\x00\n\x02\x00\x00n\xf1\x00\x00\xb4\x00\x00\x00y\xf3\x00\x00\xbf\x02\x00\x00.\xf4\x00\x00\x97\x03\x00\x00\xee\xf6\x00\x00\xbf\x01\x00\x00\x86\xfa\x00\x00A\x00\x00\x00F\xfc\x00\x00?\x00\x00\x00\x88\xfc\x00\x00,\x03\x00\x00\xc8\xfc\x00\x00<\x00\x00\x00\xf5\xff\x00\x00P\x00\x00\x002\x00\x01\x00S\x00\x00\x00\x83\x00\x01\x00<\x00\x00\x00\xd7\x00\x01\x00\xb3\x01\x00\x00\x14\x01\x01\x00\x15\x03\x00\x00\xc8\x02\x01\x00\xee\x01\x00\x00\xde\x05\x01\x00\x0e\x02\x00\x00\xcd\a\x01\x00\xdb\x01\x00\x00\xdc\t\x01\x00e\x01\x00\x00\xb8\v\x01\x00T\x01\x00\x00\x1e\r\x01\x00\xd4\x06\x00\x00s\x0e\x01\x00\x1d\x02\x00\x00H\x15\x01\x00\f\x03\x00\x00f\x17\x01\x00\x05\x03\x00\x00s\x1a\x01\x00\xfb\x00\x00\x00y\x1d\x01\x00\xb5\x01\x00\x00u\x1e\x01\x00\xb5\x01\x00\x00+ \x01\x00\x18\x00\x00\x00\xe1!\x01\x00<\x00\x00\x00\xfa!\x01\x00=\x00\x00\x007\"\x01\x00\xc6\x00\x00\x00u\"\x01\x00g\x02\x00\x00<#\x01\x00.\x00\x00\x00\xa4%\x01\x00>\x03\x00\x00\xd3%\x01\x00g\x00\x00\x00\x12)\x01\x00Q\x00\x00\x00z)\x01\x00R\x00\x00\x00\xcc)\x01\x00\"\x00\x00\x00\x1f*\x01\x00X\x02\x00\x00B*\x01\x004\x00\x00\x00\x9b,\x01\x00}\x00\x00\x00\xd0,\x01\x00k\x01\x00\x00N-\x01\x00\x83\a\x00\x00\xba.\x01\x00k\x01\x00\x00>6\x01\x00\x85\x00\x00\x00\xaa7\x01\x00\xee\x00\x00\x0008\x01\x00\xd9\x00\x00\x00\x1f9\x01\x00\x10\x05\x00\x00\xf99\x01\x00\x15\x05\x00\x00\n?\x01\x00\x1c\x00\x00\x00 D\x01\x00\x1e\x00\x00\x00=D\x01\x00\x99\x02\x00\x00\\D\x01\x00\xbc\x01\x00\x00\xf6F\x01\x00\x9c\x01\x00\x00\xb3H\x01\x00s\x01\x00\x00PJ\x01\x00\t\x01\x00\x00\xc4K\x01\x00\xec\x01\x00\x00\xceL\x01\x00\x1c\x01\x00\x00\xbbN\x01\x00\xc3\x01\x00\x00\xd8O\x01\x00\x1b\x02\x00\x00\x9cQ\x01\x00\xc0\x00\x00\x00\xb8S\x01\x00\xe1\x02\x00\x00yT\x01\x00\x9f\x00\x00\x00[W\x01\x00X\x00\x00\x00\xfbW\x01\x00*\x02\x00\x00TX\x01\x00o\x00\x00\x00\u007fZ\x01\x00v\x00\x00\x00\xefZ\x01\x00\x01\x01\x00\x00f[\x01\x00v\x00\x00\x00h\\\x01\x00u\x00\x00\x00\xdf\\\x01\x00\xf5\x00\x00\x00U]\x01\x00~\x00\x00\x00K^\x01\x00k\x00\x00\x00\xca^\x01\x00\xc8\x01\x00\x006_\x01\x00#\x04\x00\x00\xff`\x01\x00;\x00\x00\x00#e\x01\x008\x00\x00\x00_e\x01\x001\x00\x00\x00\x98e\x01\x007\x00\x00\x00\xcae\x01\x00|\x02\x00\x00\x02f\x01\x00\xb4\x00\x00\x00\u007fh\x01\x00[\x00\x00\x004i\x01\x00J\x00\x00\x00\x90i\x01\x00a\x00\x00\x00\xdbi\x01\x00\xbd\x00\x00\x00=j\x01\x009\x00\x00\x00\xfbj\x01\x00\xc5\x00\x00\x005k\x01\x00\xae\x00\x00\x00\xfbk\x01\x00\xd7\x00\x00\x00\xaal\x01\x00c\x00\x00\x00\x82m\x01\x00%\x00\x00\x00\xe6m\x01\x00_\x00\x00\x00\fn\x01\x00\x1d\x00\x00\x00ln\x01\x00=\x00\x00\x00\x8an\x01\x00y\x00\x00\x00\xc8n\x01\x004\x00\x00\x00Bo\x01\x00-\x00\x00\x00wo\x01\x00\xa5\x00\x00\x00\xa5o\x01\x003\x00\x00\x00Kp\x01\x002\x00\x00\x00\u007fp\x01\x008\x00\x00\x00\xb2p\x01\x00\x1e\x00\x00\x00\xebp\x01\x00\x1a\x00\x00\x00\nq\x01\x009\x00\x00\x00%q\x01\x00\x13\x00\x00\x00_q\x01\x00\x1b\x00\x00\x00sq\x01\x00@\x00\x00\x00\x8fq\x01\x00,\x00\x00\x00\xd0q\x01\x00*\x00\x00\x00\xfdq\x01\x007\x00\x00\x00(r\x01\x00'\x00\x00\x00`r\x01\x00Q\x00\x00\x00\x88r\x01\x00.\x00\x00\x00\xdar\x01\x00=\x00\x00\x00\ts\x01\x00*\x00\x00\x00Gs\x01\x000\x00\x00\x00rs\x01\x00,\x00\x00\x00\xa3s\x01\x00\x1f\x00\x00\x00\xd0s\x01\x00]\x00\x00\x00\xf0s\x01\x00:\x00\x00\x00Nt\x01\x00=\x00\x00\x00\x89t\x01\x00\"\x00\x00\x00\xc7t\x01\x00?\x00\x00\x00\xeat\x01\x007\x00\x00\x00*u\x01\x00,\x00\x00\x00bu\x01\x00+\x00\x00\x00\x8fu\x01\x00$\x00\x00\x00\xbbu\x01\x00'\x00\x00\x00\xe0u\x01\x00:\x00\x00\x00\bv\x01\x00b\x00\x00\x00Cv\x01\x001\x00\x00\x00\xa6v\x01\x00-\x00\x00\x00\xd8v\x01\x00$\x00\x00\x00\x06w\x01\x00)\x00\x00\x00+w\x01\x006\x00\x00\x00Uw\x01\x00\x1d\x00\x00\x00\x8cw\x01\x00\x19\x00\x00\x00\xaaw\x01\x00 \x00\x00\x00\xc4w\x01\x00w\x00\x00\x00\xe5w\x01\x00(\x00\x00\x00]x\x01\x00\x16\x00\x00\x00\x86x\x01\x00p\x00\x00\x00\x9dx\x01\x00h\x00\x00\x00\x0ey\x01\x00\x9b\x00\x00\x00wy\x01\x00\x97\x00\x00\x00\x13z\x01\x00\xa9\x00\x00\x00\xabz\x01\x00\x1b\x00\x00\x00U{\x01\x00\x18\x00\x00\x00q{\x01\x00\x1a\x00\x00\x00\x8a{\x01\x00$\x00\x00\x00\xa5{\x01\x00\x1d\x00\x00\x00\xca{\x01\x00%\x00\x00\x00\xe8{\x01\x00a\x00\x00\x00\x0e|\x01\x00s\x00\x00\x00p|\x01\x00B\x00\x00\x00\xe4|\x01\x00]\x00\x00\x00'}\x01\x00+\x00\x00\x00\x85}\x01\x00+\x00\x00\x00\xb1}\x01\x006\x00\x00\x00\xdd}\x01\x00;\x00\x00\x00\x14~\x01\x00q\x00\x00\x00P~\x01\x00/\x00\x00\x00\xc2~\x01\x001\x00\x00\x00\xf2~\x01\x00'\x00\x00\x00$\u007f\x01\x00'\x00\x00\x00L\u007f\x01\x00\x18\x00\x00\x00t\u007f\x01\x00&\x00\x00\x00\x8d\u007f\x01\x00%\x00\x00\x00\xb4\u007f\x01\x00(\x00\x00\x00\xda\u007f\x01\x00#\x00\x00\x00\x03\x80\x01\x00K\x00\x00\x00'\x80\x01\x00 \x00\x00\x00s\x80\x01\x00_\x00\x00\x00\x94\x80\x01\x00-\x00\x00\x00\xf4\x80\x01\x007\x00\x00\x00\"\x81\x01\x00:\x00\x00\x00Z\x81\x01\x004\x00\x00\x00\x95\x81\x01\x00:\x00\x00\x00\u0281\x01\x00:\x00\x00\x00\x05\x82\x01\x009\x00\x00\x00@\x82\x01\x00\x1e\x00\x00\x00z\x82\x01\x00\x1a\x00\x00\x00\x99\x82\x01\x00c\x00\x00\x00\xb4\x82\x01\x00#\x00\x00\x00\x18\x83\x01\x00\x82\x00\x00\x00<\x83\x01\x00\x99\x00\x00\x00\xbf\x83\x01\x00H\x00\x00\x00Y\x84\x01\x00&\x00\x00\x00\xa2\x84\x01\x00e\x00\x00\x00\u0244\x01\x00z\x00\x00\x00/\x85\x01\x00J\x00\x00\x00\xaa\x85\x01\x00\xf5\x00\x00\x00\xf5\x85\x01\x00X\x00\x00\x00\xeb\x86\x01\x00I\x00\x00\x00D\x87\x01\x00a\x00\x00\x00\x8e\x87\x01\x00w\x00\x00\x00\xf0\x87\x01\x00\xcf\x00\x00\x00h\x88\x01\x00\xd3\x00\x00\x008\x89\x01\x00/\x01\x00\x00\f\x8a\x01\x00\x1c\x00\x00\x00<\x8b\x01\x00X\x00\x00\x00Y\x8b\x01\x00*\x00\x00\x00\xb2\x8b\x01\x00:\x00\x00\x00\u074b\x01\x009\x00\x00\x00\x18\x8c\x01\x00\x1e\x00\x00\x00R\x8c\x01\x00=\x00\x00\x00q\x8c\x01\x003\x00\x00\x00\xaf\x8c\x01\x00'\x00\x00\x00\xe3\x8c\x01\x00&\x00\x00\x00\v\x8d\x01\x00+\x00\x00\x002\x8d\x01\x00G\x00\x00\x00^\x8d\x01\x00*\x00\x00\x00\xa6\x8d\x01\x00v\x00\x00\x00\u044d\x01\x00\x13\x00\x00\x00H\x8e\x01\x00\x18\x00\x00\x00\\\x8e\x01\x00/\x00\x00\x00u\x8e\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00^\x00\x00\x00\\\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00F\x00\x00\x00\xc4\x00\x00\x00\x0f\x00\x00\x00\xc3\x00\x00\x00\x00\x00\x00\x00-\x00\x00\x00\x00\x00\x00\x00\x86\x00\x00\x00\xeb\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x001\x00\x00\x00o\x00\x00\x00}\x00\x00\x00\x00\x00\x00\x00J\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x98\x00\x00\x00U\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xdb\x00\x00\x00\x17\x00\x00\x00u\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x8a\x00\x00\x00\x90\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc9\x00\x00\x00\xb7\x00\x00\x00\xd7\x00\x00\x00*\x00\x00\x00\x99\x00\x00\x00\x00\x00\x00\x00\x1f\x00\x00\x00\x84\x00\x00\x00\x9c\x00\x00\x00\xe6\x00\x00\x00\x9d\x00\x00\x00\xc5\x00\x00\x00\xd9\x00\x00\x00\xb9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x19\x00\x00\x00\xcd\x00\x00\x00\xcb\x00\x00\x00y\x00\x00\x00\x97\x00\x00\x00\xba\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00C\x00\x00\x00\x93\x00\x00\x00\xad\x00\x00\x00\xe1\x00\x00\x00\xa6\x00\x00\x00\xd0\x00\x00\x00r\x00\x00\x00+\x00\x00\x006\x00\x00\x00\x00\x00\x00\x00\xa5\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00h\x00\x00\x00\xa0\x00\x00\x00\x00\x00\x00\x00\xd1\x00\x00\x00\xde\x00\x00\x00;\x00\x00\x00\x00\x00\x00\x00\xe9\x00\x00\x00\xe7\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00z\x00\x00\x00/\x00\x00\x00V\x00\x00\x00\x8d\x00\x00\x00\xe3\x00\x00\x00!\x00\x00\x00~\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd3\x00\x00\x00\x88\x00\x00\x00l\x00\x00\x00s\x00\x00\x00g\x00\x00\x00\x05\x00\x00\x00\xc6\x00\x00\x00#\x00\x00\x00\x9f\x00\x00\x00\x00\x00\x00\x00\xb1\x00\x00\x00\x00\x00\x00\x00\xc2\x00\x00\x00\x13\x00\x00\x00S\x00\x00\x00G\x00\x00\x00$\x00\x00\x00\xc1\x00\x00\x00\xb5\x00\x00\x00X\x00\x00\x00m\x00\x00\x00\t\x00\x00\x00x\x00\x00\x00\xb8\x00\x00\x00\xbd\x00\x00\x00k\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00<\x00\x00\x00E\x00\x00\x00\xbf\x00\x00\x00\xbc\x00\x00\x00\x00\x00\x00\x00:\x00\x00\x00\x82\x00\x00\x00\x81\x00\x00\x00&\x00\x00\x00\xe0\x00\x00\x00\x00\x00\x00\x00[\x00\x00\x00I\x00\x00\x00e\x00\x00\x00\x04\x00\x00\x00>\x00\x00\x00\b\x00\x00\x00\x94\x00\x00\x00\x8f\x00\x00\x00\xce\x00\x00\x00?\x00\x00\x00Y\x00\x00\x00\xda\x00\x00\x00\x15\x00\x00\x00\x00\x00\x00\x00'\x00\x00\x004\x00\x00\x00\xcc\x00\x00\x00\f\x00\x00\x005\x00\x00\x00(\x00\x00\x00\x00\x00\x00\x00\xbb\x00\x00\x00\x00\x00\x00\x00\xa9\x00\x00\x00\x9e\x00\x00\x00\x00\x00\x00\x00\xe5\x00\x00\x00\x00\x00\x00\x00O\x00\x00\x00 \x00\x00\x00)\x00\x00\x00\xcf\x00\x00\x00\x00\x00\x00\x00\x1b\x00\x00\x00Z\x00\x00\x00\"\x00\x00\x00\x00\x00\x00\x00v\x00\x00\x00]\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00a\x00\x00\x00j\x00\x00\x008\x00\x00\x00\xa3\x00\x00\x00q\x00\x00\x00t\x00\x00\x00_\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\v\x00\x00\x00@\x00\x00\x00\xd2\x00\x00\x00,\x00\x00\x00\x00\x00\x00\x00\x85\x00\x00\x00\x00\x00\x00\x00\x92\x00\x00\x00\x00\x00\x00\x00\xc8\x00\x00\x00\x95\x00\x00\x00\x06\x00\x00\x00\xa8\x00\x00\x00\xae\x00\x00\x00\xa1\x00\x00\x00\x00\x00\x00\x00\x91\x00\x00\x00\x0e\x00\x00\x00{\x00\x00\x00\xa7\x00\x00\x00\x00\x00\x00\x00\xb6\x00\x00\x00i\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd5\x00\x00\x00L\x00\x00\x00\x00\x00\x00\x00\xea\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00w\x00\x00\x00\x12\x00\x00\x00=\x00\x00\x00\xaf\x00\x00\x00\a\x00\x00\x00\xdf\x00\x00\x00\xc0\x00\x00\x00N\x00\x00\x00%\x00\x00\x009\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00.\x00\x00\x00\x00\x00\x00\x00\u007f\x00\x00\x00\xbe\x00\x00\x00\xe2\x00\x00\x00\x00\x00\x00\x00P\x00\x00\x00\xb3\x00\x00\x00\r\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00R\x00\x00\x00D\x00\x00\x00B\x00\x00\x00n\x00\x00\x00\x00\x00\x00\x00\xd6\x00\x00\x00\x83\x00\x00\x00\n\x00\x00\x00W\x00\x00\x00\x14\x00\x00\x00Q\x00\x00\x00\xd4\x00\x00\x00d\x00\x00\x00\xac\x00\x00\x00\x16\x00\x00\x00\x96\x00\x00\x00K\x00\x00\x002\x00\x00\x00\x1a\x00\x00\x00\xb4\x00\x00\x00f\x00\x00\x00\xa2\x00\x00\x00\xe8\x00\x00\x00\x02\x00\x00\x00A\x00\x00\x00\xe4\x00\x00\x00\x8c\x00\x00\x00\x9a\x00\x00\x00`\x00\x00\x00\xab\x00\x00\x00M\x00\x00\x007\x00\x00\x000\x00\x00\x00\x00\x00\x00\x00\x1d\x00\x00\x00\x00\x00\x00\x00\xdc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x9b\x00\x00\x00\x89\x00\x00\x00\x00\x00\x00\x00\xdd\x00\x00\x00\x8e\x00\x00\x00\xca\x00\x00\x00H\x00\x00\x00\x00\x00\x00\x00\xb2\x00\x00\x00\x00\x00\x00\x00\x1c\x00\x00\x00\xb0\x00\x00\x00\x00\x00\x00\x00|\x00\x00\x003\x00\x00\x00T\x00\x00\x00\x87\x00\x00\x00b\x00\x00\x00\x00\x00\x00\x00\x1e\x00\x00\x00\xaa\x00\x00\x00\xa4\x00\x00\x00\x00\x00\x00\x00p\x00\x00\x00\xc7\x00\x00\x00\x8b\x00\x00\x00\x00\n\t\t # Create a ClusterRoleBinding for user1, user2, and group1 using the cluster-admin ClusterRole\n\t\t kubectl create clusterrolebinding cluster-admin --clusterrole=cluster-admin --user=user1 --user=user2 --group=group1\x00\n\t\t # Create a RoleBinding for user1, user2, and group1 using the admin ClusterRole\n\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --user=user2 --group=group1\x00\n\t\t # Create a new configmap named my-config based on folder bar\n\t\t kubectl create configmap my-config --from-file=path/to/bar\n\n\t\t # Create a new configmap named my-config with specified keys instead of file basenames on disk\n\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/file1.txt --from-file=key2=/path/to/bar/file2.txt\n\n\t\t # Create a new configmap named my-config with key1=config1 and key2=config2\n\t\t kubectl create configmap my-config --from-literal=key1=config1 --from-literal=key2=config2\x00\n\t\t # If you don't already have a .dockercfg file, you can create a dockercfg secret directly by using:\n\t\t kubectl create secret docker-registry my-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL\x00\n\t\t # Show metrics for all nodes\n\t\t kubectl top node\n\n\t\t # Show metrics for a given node\n\t\t kubectl top node NODE_NAME\x00\n\t\t# Apply the configuration in pod.json to a pod.\n\t\tkubectl apply -f ./pod.json\n\n\t\t# Apply the JSON passed into stdin to a pod.\n\t\tcat pod.json | kubectl apply -f -\n\n\t\t# Note: --prune is still in Alpha\n\t\t# Apply the configuration in manifest.yaml that matches label app=nginx and delete all the other resources that are not in the file and match label app=nginx.\n\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n\n\t\t# Apply the configuration in manifest.yaml and delete all the other configmaps that are not in the file.\n\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/ConfigMap\x00\n\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 and 10, no target CPU utilization specified so a default autoscaling policy will be used:\n\t\tkubectl autoscale deployment foo --min=2 --max=10\n\n\t\t# Auto scale a replication controller \"foo\", with the number of pods between 1 and 5, target CPU utilization at 80%:\n\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80\x00\n\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n\t\tkubectl convert -f pod.yaml\n\n\t\t# Convert the live state of the resource specified by 'pod.yaml' to the latest version\n\t\t# and print to stdout in json format.\n\t\tkubectl convert -f pod.yaml --local -o json\n\n\t\t# Convert all files under current directory to latest version and create them all.\n\t\tkubectl convert -f . | kubectl create -f -\x00\n\t\t# Create a ClusterRole named \"pod-reader\" that allows user to perform \"get\", \"watch\" and \"list\" on pods\n\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --resource=pods\n\n\t\t# Create a ClusterRole named \"pod-reader\" with ResourceName specified\n\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --resource=pods --resource-name=readablepod\x00\n\t\t# Create a Role named \"pod-reader\" that allows user to perform \"get\", \"watch\" and \"list\" on pods\n\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --resource=pods\n\n\t\t# Create a Role named \"pod-reader\" with ResourceName specified\n\t\tkubectl create role pod-reader --verb=get --verg=list --verb=watch --resource=pods --resource-name=readablepod\x00\n\t\t# Create a new resourcequota named my-quota\n\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3,replicationcontrollers=2,resourcequotas=1,secrets=5,persistentvolumeclaims=10\n\n\t\t# Create a new resourcequota named best-effort\n\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort\x00\n\t\t# Create a pod disruption budget named my-pdb that will select all pods with the app=rails label\n\t\t# and require at least one of them being available at any point in time.\n\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-available=1\n\n\t\t# Create a pod disruption budget named my-pdb that will select all pods with the app=nginx label\n\t\t# and require at least half of the pods selected to be available at any point in time.\n\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%\x00\n\t\t# Create a pod using the data in pod.json.\n\t\tkubectl create -f ./pod.json\n\n\t\t# Create a pod based on the JSON passed into stdin.\n\t\tcat pod.json | kubectl create -f -\n\n\t\t# Edit the data in docker-registry.yaml in JSON using the v1 API format then create the resource using the edited data.\n\t\tkubectl create -f docker-registry.yaml --edit --output-version=v1 -o json\x00\n\t\t# Create a service for a replicated nginx, which serves on port 80 and connects to the containers on port 8000.\n\t\tkubectl expose rc nginx --port=80 --target-port=8000\n\n\t\t# Create a service for a replication controller identified by type and name specified in \"nginx-controller.yaml\", which serves on port 80 and connects to the containers on port 8000.\n\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n\n\t\t# Create a service for a pod valid-pod, which serves on port 444 with the name \"frontend\"\n\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n\n\t\t# Create a second service based on the above service, exposing the container port 8443 as port 443 with the name \"nginx-https\"\n\t\tkubectl expose service nginx --port=443 --target-port=8443 --name=nginx-https\n\n\t\t# Create a service for a replicated streaming application on port 4100 balancing UDP traffic and named 'video-stream'.\n\t\tkubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream\n\n\t\t# Create a service for a replicated nginx using replica set, which serves on port 80 and connects to the containers on port 8000.\n\t\tkubectl expose rs nginx --port=80 --target-port=8000\n\n\t\t# Create a service for an nginx deployment, which serves on port 80 and connects to the containers on port 8000.\n\t\tkubectl expose deployment nginx --port=80 --target-port=8000\x00\n\t\t# Delete a pod using the type and name specified in pod.json.\n\t\tkubectl delete -f ./pod.json\n\n\t\t# Delete a pod based on the type and name in the JSON passed into stdin.\n\t\tcat pod.json | kubectl delete -f -\n\n\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n\t\tkubectl delete pod,service baz foo\n\n\t\t# Delete pods and services with label name=myLabel.\n\t\tkubectl delete pods,services -l name=myLabel\n\n\t\t# Delete a pod with minimal delay\n\t\tkubectl delete pod foo --now\n\n\t\t# Force delete a pod on a dead node\n\t\tkubectl delete pod foo --grace-period=0 --force\n\n\t\t# Delete all pods\n\t\tkubectl delete pods --all\x00\n\t\t# Describe a node\n\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n\n\t\t# Describe a pod\n\t\tkubectl describe pods/nginx\n\n\t\t# Describe a pod identified by type and name in \"pod.json\"\n\t\tkubectl describe -f pod.json\n\n\t\t# Describe all pods\n\t\tkubectl describe pods\n\n\t\t# Describe pods by label name=myLabel\n\t\tkubectl describe po -l name=myLabel\n\n\t\t# Describe all pods managed by the 'frontend' replication controller (rc-created pods\n\t\t# get the name of the rc as a prefix in the pod the name).\n\t\tkubectl describe pods frontend\x00\n\t\t# Drain node \"foo\", even if there are pods not managed by a ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet on it.\n\t\t$ kubectl drain foo --force\n\n\t\t# As above, but abort if there are pods not managed by a ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet, and use a grace period of 15 minutes.\n\t\t$ kubectl drain foo --grace-period=900\x00\n\t\t# Edit the service named 'docker-registry':\n\t\tkubectl edit svc/docker-registry\n\n\t\t# Use an alternative editor\n\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n\n\t\t# Edit the job 'myjob' in JSON using the v1 API format:\n\t\tkubectl edit job.v1.batch/myjob -o json\n\n\t\t# Edit the deployment 'mydeployment' in YAML and save the modified config in its annotation:\n\t\tkubectl edit deployment/mydeployment -o yaml --save-config\x00\n\t\t# Get output from running 'date' from pod 123456-7890, using the first container by default\n\t\tkubectl exec 123456-7890 date\n\n\t\t# Get output from running 'date' in ruby-container from pod 123456-7890\n\t\tkubectl exec 123456-7890 -c ruby-container date\n\n\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-container from pod 123456-7890\n\t\t# and sends stdout/stderr from 'bash' back to the client\n\t\tkubectl exec 123456-7890 -c ruby-container -i -t -- bash -il\x00\n\t\t# Get output from running pod 123456-7890, using the first container by default\n\t\tkubectl attach 123456-7890\n\n\t\t# Get output from ruby-container from pod 123456-7890\n\t\tkubectl attach 123456-7890 -c ruby-container\n\n\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-container from pod 123456-7890\n\t\t# and sends stdout/stderr from 'bash' back to the client\n\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n\n\t\t# Get output from the first pod of a ReplicaSet named nginx\n\t\tkubectl attach rs/nginx\n\t\t\x00\n\t\t# Get the documentation of the resource and its fields\n\t\tkubectl explain pods\n\n\t\t# Get the documentation of a specific field of a resource\n\t\tkubectl explain pods.spec.containers\x00\n\t\t# Install bash completion on a Mac using homebrew\n\t\tbrew install bash-completion\n\t\tprintf \"\n# Bash completion support\nsource $(brew --prefix)/etc/bash_completion\n\" >> $HOME/.bash_profile\n\t\tsource $HOME/.bash_profile\n\n\t\t# Load the kubectl completion code for bash into the current shell\n\t\tsource <(kubectl completion bash)\n\n\t\t# Write bash completion code to a file and source if from .bash_profile\n\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n\t\tprintf \"\n# Kubectl shell completion\nsource '$HOME/.kube/completion.bash.inc'\n\" >> $HOME/.bash_profile\n\t\tsource $HOME/.bash_profile\n\n\t\t# Load the kubectl completion code for zsh[1] into the current shell\n\t\tsource <(kubectl completion zsh)\x00\n\t\t# List all pods in ps output format.\n\t\tkubectl get pods\n\n\t\t# List all pods in ps output format with more information (such as node name).\n\t\tkubectl get pods -o wide\n\n\t\t# List a single replication controller with specified NAME in ps output format.\n\t\tkubectl get replicationcontroller web\n\n\t\t# List a single pod in JSON output format.\n\t\tkubectl get -o json pod web-pod-13je7\n\n\t\t# List a pod identified by type and name specified in \"pod.yaml\" in JSON output format.\n\t\tkubectl get -f pod.yaml -o json\n\n\t\t# Return only the phase value of the specified pod.\n\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status.phase}}\n\n\t\t# List all replication controllers and services together in ps output format.\n\t\tkubectl get rc,services\n\n\t\t# List one or more resources by their type and names.\n\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n\n\t\t# List all resources with different types.\n\t\tkubectl get all\x00\n\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in the pod\n\t\tkubectl port-forward mypod 5000 6000\n\n\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n\t\tkubectl port-forward mypod 8888:5000\n\n\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n\t\tkubectl port-forward mypod :5000\n\n\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n\t\tkubectl port-forward mypod 0:5000\x00\n\t\t# Mark node \"foo\" as schedulable.\n\t\t$ kubectl uncordon foo\x00\n\t\t# Mark node \"foo\" as unschedulable.\n\t\tkubectl cordon foo\x00\n\t\t# Partially update a node using strategic merge patch\n\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":true}}'\n\n\t\t# Partially update a node identified by the type and name specified in \"node.json\" using strategic merge patch\n\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n\n\t\t# Update a container's image; spec.containers[*].name is required because it's a merge key\n\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n\n\t\t# Update a container's image using a json patch with positional arrays\n\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", \"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'\x00\n\t\t# Print flags inherited by all commands\n\t\tkubectl options\x00\n\t\t# Print the address of the master and cluster services\n\t\tkubectl cluster-info\x00\n\t\t# Print the client and server versions for the current context\n\t\tkubectl version\x00\n\t\t# Print the supported API versions\n\t\tkubectl api-versions\x00\n\t\t# Replace a pod using the data in pod.json.\n\t\tkubectl replace -f ./pod.json\n\n\t\t# Replace a pod based on the JSON passed into stdin.\n\t\tcat pod.json | kubectl replace -f -\n\n\t\t# Update a single-container pod's image version (tag) to v4\n\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/\x01:v4/' | kubectl replace -f -\n\n\t\t# Force replace, delete and then re-create the resource\n\t\tkubectl replace --force -f ./pod.json\x00\n\t\t# Return snapshot logs from pod nginx with only one container\n\t\tkubectl logs nginx\n\n\t\t# Return snapshot logs for the pods defined by label app=nginx\n\t\tkubectl logs -lapp=nginx\n\n\t\t# Return snapshot of previous terminated ruby container logs from pod web-1\n\t\tkubectl logs -p -c ruby web-1\n\n\t\t# Begin streaming the logs of the ruby container in pod web-1\n\t\tkubectl logs -f -c ruby web-1\n\n\t\t# Display only the most recent 20 lines of output in pod nginx\n\t\tkubectl logs --tail=20 nginx\n\n\t\t# Show all logs from pod nginx written in the last hour\n\t\tkubectl logs --since=1h nginx\n\n\t\t# Return snapshot logs from first container of a job named hello\n\t\tkubectl logs job/hello\n\n\t\t# Return snapshot logs from container nginx-1 of a deployment named nginx\n\t\tkubectl logs deployment/nginx -c nginx-1\x00\n\t\t# Run a proxy to kubernetes apiserver on port 8011, serving static content from ./local/www/\n\t\tkubectl proxy --port=8011 --www=./local/www/\n\n\t\t# Run a proxy to kubernetes apiserver on an arbitrary local port.\n\t\t# The chosen port for the server will be output to stdout.\n\t\tkubectl proxy --port=0\n\n\t\t# Run a proxy to kubernetes apiserver, changing the api prefix to k8s-api\n\t\t# This makes e.g. the pods api available at localhost:8001/k8s-api/v1/pods/\n\t\tkubectl proxy --api-prefix=/k8s-api\x00\n\t\t# Scale a replicaset named 'foo' to 3.\n\t\tkubectl scale --replicas=3 rs/foo\n\n\t\t# Scale a resource identified by type and name specified in \"foo.yaml\" to 3.\n\t\tkubectl scale --replicas=3 -f foo.yaml\n\n\t\t# If the deployment named mysql's current size is 2, scale mysql to 3.\n\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n\n\t\t# Scale multiple replication controllers.\n\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n\n\t\t# Scale job named 'cron' to 3.\n\t\tkubectl scale --replicas=3 job/cron\x00\n\t\t# Set the last-applied-configuration of a resource to match the contents of a file.\n\t\tkubectl apply set-last-applied -f deploy.yaml\n\n\t\t# Execute set-last-applied against each configuration file in a directory.\n\t\tkubectl apply set-last-applied -f path/\n\n\t\t# Set the last-applied-configuration of a resource to match the contents of a file, will create the annotation if it does not already exist.\n\t\tkubectl apply set-last-applied -f deploy.yaml --create-annotation=true\n\t\t\x00\n\t\t# Show metrics for all pods in the default namespace\n\t\tkubectl top pod\n\n\t\t# Show metrics for all pods in the given namespace\n\t\tkubectl top pod --namespace=NAMESPACE\n\n\t\t# Show metrics for a given pod and its containers\n\t\tkubectl top pod POD_NAME --containers\n\n\t\t# Show metrics for the pods defined by label name=myLabel\n\t\tkubectl top pod -l name=myLabel\x00\n\t\t# Shut down foo.\n\t\tkubectl stop replicationcontroller foo\n\n\t\t# Stop pods and services with label name=myLabel.\n\t\tkubectl stop pods,services -l name=myLabel\n\n\t\t# Shut down the service defined in service.json\n\t\tkubectl stop -f service.json\n\n\t\t# Shut down all resources in the path/to/resources directory\n\t\tkubectl stop -f path/to/resources\x00\n\t\t# Start a single instance of nginx.\n\t\tkubectl run nginx --image=nginx\n\n\t\t# Start a single instance of hazelcast and let the container expose port 5701 .\n\t\tkubectl run hazelcast --image=hazelcast --port=5701\n\n\t\t# Start a single instance of hazelcast and set environment variables \"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container.\n\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" --env=\"POD_NAMESPACE=default\"\n\n\t\t# Start a replicated instance of nginx.\n\t\tkubectl run nginx --image=nginx --replicas=5\n\n\t\t# Dry run. Print the corresponding API objects without creating them.\n\t\tkubectl run nginx --image=nginx --dry-run\n\n\t\t# Start a single instance of nginx, but overload the spec of the deployment with a partial set of values parsed from JSON.\n\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": \"v1\", \"spec\": { ... } }'\n\n\t\t# Start a pod of busybox and keep it in the foreground, don't restart it if it exits.\n\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n\n\t\t# Start the nginx container using the default command, but use custom arguments (arg1 .. argN) for that command.\n\t\tkubectl run nginx --image=nginx -- ... \n\n\t\t# Start the nginx container using a different command and custom arguments.\n\t\tkubectl run nginx --image=nginx --command -- ... \n\n\t\t# Start the perl container to compute \u03c0 to 2000 places and print it out.\n\t\tkubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'\n\n\t\t# Start the cron job to compute \u03c0 to 2000 places and print it out every 5 minutes.\n\t\tkubectl run pi --schedule=\"0/5 * * * ?\" --image=perl --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'\x00\n\t\t# Update node 'foo' with a taint with key 'dedicated' and value 'special-user' and effect 'NoSchedule'.\n\t\t# If a taint with that key and effect already exists, its value is replaced as specified.\n\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n\n\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect 'NoSchedule' if one exists.\n\t\tkubectl taint nodes foo dedicated:NoSchedule-\n\n\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n\t\tkubectl taint nodes foo dedicated-\x00\n\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'.\n\t\tkubectl label pods foo unhealthy=true\n\n\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', overwriting any existing value.\n\t\tkubectl label --overwrite pods foo status=unhealthy\n\n\t\t# Update all pods in the namespace\n\t\tkubectl label pods --all status=unhealthy\n\n\t\t# Update a pod identified by the type and name in \"pod.json\"\n\t\tkubectl label -f pod.json status=unhealthy\n\n\t\t# Update pod 'foo' only if the resource is unchanged from version 1.\n\t\tkubectl label pods foo status=unhealthy --resource-version=1\n\n\t\t# Update pod 'foo' by removing a label named 'bar' if it exists.\n\t\t# Does not require the --overwrite flag.\n\t\tkubectl label pods foo bar-\x00\n\t\t# Update pods of frontend-v1 using new replication controller data in frontend-v2.json.\n\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n\n\t\t# Update pods of frontend-v1 using JSON data passed into stdin.\n\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n\n\t\t# Update the pods of frontend-v1 to frontend-v2 by just changing the image, and switching the\n\t\t# name of the replication controller.\n\t\tkubectl rolling-update frontend-v1 frontend-v2 --image=image:v2\n\n\t\t# Update the pods of frontend by just changing the image, and keeping the old name.\n\t\tkubectl rolling-update frontend --image=image:v2\n\n\t\t# Abort and reverse an existing rollout in progress (from frontend-v1 to frontend-v2).\n\t\tkubectl rolling-update frontend-v1 frontend-v2 --rollback\x00\n\t\t# View the last-applied-configuration annotations by type/name in YAML.\n\t\tkubectl apply view-last-applied deployment/nginx\n\n\t\t# View the last-applied-configuration annotations by file in JSON\n\t\tkubectl apply view-last-applied -f deploy.yaml -o json\x00\n\t\tApply a configuration to a resource by filename or stdin.\n\t\tThis resource will be created if it doesn't exist yet.\n\t\tTo use 'apply', always create the resource initially with either 'apply' or 'create --save-config'.\n\n\t\tJSON and YAML formats are accepted.\n\n\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do not use unless you are aware of what the current state is. See https://issues.k8s.io/34274.\x00\n\t\tConvert config files between different API versions. Both YAML\n\t\tand JSON formats are accepted.\n\n\t\tThe command takes filename, directory, or URL as input, and convert it into format\n\t\tof version specified by --output-version flag. If target version is not specified or\n\t\tnot supported, convert to latest version.\n\n\t\tThe default output will be printed to stdout in YAML format. One can use -o option\n\t\tto change to output destination.\x00\n\t\tCreate a ClusterRole.\x00\n\t\tCreate a ClusterRoleBinding for a particular ClusterRole.\x00\n\t\tCreate a RoleBinding for a particular Role or ClusterRole.\x00\n\t\tCreate a TLS secret from the given public/private key pair.\n\n\t\tThe public/private key pair must exist before hand. The public key certificate must be .PEM encoded and match the given private key.\x00\n\t\tCreate a configmap based on a file, directory, or specified literal value.\n\n\t\tA single configmap may package one or more key/value pairs.\n\n\t\tWhen creating a configmap based on a file, the key will default to the basename of the file, and the value will\n\t\tdefault to the file content. If the basename is an invalid key, you may specify an alternate key.\n\n\t\tWhen creating a configmap based on a directory, each file whose basename is a valid key in the directory will be\n\t\tpackaged into the configmap. Any directory entries except regular files are ignored (e.g. subdirectories,\n\t\tsymlinks, devices, pipes, etc).\x00\n\t\tCreate a namespace with the specified name.\x00\n\t\tCreate a new secret for use with Docker registries.\n\n\t\tDockercfg secrets are used to authenticate against Docker registries.\n\n\t\tWhen using the Docker command line to push images, you can authenticate to a given registry by running\n\n\t\t $ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n\n That produces a ~/.dockercfg file that is used by subsequent 'docker push' and 'docker pull' commands to\n\t\tauthenticate to the registry. The email address is optional.\n\n\t\tWhen creating applications, you may have a Docker registry that requires authentication. In order for the\n\t\tnodes to pull images on your behalf, they have to have the credentials. You can provide this information\n\t\tby creating a dockercfg secret and attaching it to your service account.\x00\n\t\tCreate a pod disruption budget with the specified name, selector, and desired minimum available pods\x00\n\t\tCreate a resource by filename or stdin.\n\n\t\tJSON and YAML formats are accepted.\x00\n\t\tCreate a resourcequota with the specified name, hard limits and optional scopes\x00\n\t\tCreate a role with single rule.\x00\n\t\tCreate a secret based on a file, directory, or specified literal value.\n\n\t\tA single secret may package one or more key/value pairs.\n\n\t\tWhen creating a secret based on a file, the key will default to the basename of the file, and the value will\n\t\tdefault to the file content. If the basename is an invalid key, you may specify an alternate key.\n\n\t\tWhen creating a secret based on a directory, each file whose basename is a valid key in the directory will be\n\t\tpackaged into the secret. Any directory entries except regular files are ignored (e.g. subdirectories,\n\t\tsymlinks, devices, pipes, etc).\x00\n\t\tCreate a service account with the specified name.\x00\n\t\tCreate and run a particular image, possibly replicated.\n\n\t\tCreates a deployment or job to manage the created container(s).\x00\n\t\tCreates an autoscaler that automatically chooses and sets the number of pods that run in a kubernetes cluster.\n\n\t\tLooks up a Deployment, ReplicaSet, or ReplicationController by name and creates an autoscaler that uses the given resource as a reference.\n\t\tAn autoscaler can automatically increase or decrease number of pods deployed within the system as needed.\x00\n\t\tDelete resources by filenames, stdin, resources and names, or by resources and label selector.\n\n\t\tJSON and YAML formats are accepted. Only one type of the arguments may be specified: filenames,\n\t\tresources and names, or resources and label selector.\n\n\t\tSome resources, such as pods, support graceful deletion. These resources define a default period\n\t\tbefore they are forcibly terminated (the grace period) but you may override that value with\n\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. Because these resources often\n\t\trepresent entities in the cluster, deletion may not be acknowledged immediately. If the node\n\t\thosting a pod is down or cannot reach the API server, termination may take significantly longer\n\t\tthan the grace period. To force delete a resource,\tyou must pass a grace\tperiod of 0 and specify\n\t\tthe --force flag.\n\n\t\tIMPORTANT: Force deleting pods does not wait for confirmation that the pod's processes have been\n\t\tterminated, which can leave those processes running until the node detects the deletion and\n\t\tcompletes graceful deletion. If your processes use shared storage or talk to a remote API and\n\t\tdepend on the name of the pod to identify themselves, force deleting those pods may result in\n\t\tmultiple processes running on different machines using the same identification which may lead\n\t\tto data corruption or inconsistency. Only force delete pods when you are sure the pod is\n\t\tterminated, or if your application can tolerate multiple copies of the same pod running at once.\n\t\tAlso, if you force delete pods the scheduler may place new pods on those nodes before the node\n\t\thas released those resources and causing those pods to be evicted immediately.\n\n\t\tNote that the delete command does NOT do resource version checks, so if someone\n\t\tsubmits an update to a resource right when you submit a delete, their update\n\t\twill be lost along with the rest of the resource.\x00\n\t\tDeprecated: Gracefully shut down a resource by name or filename.\n\n\t\tThe stop command is deprecated, all its functionalities are covered by delete command.\n\t\tSee 'kubectl delete --help' for more details.\n\n\t\tAttempts to shut down and delete a resource that supports graceful termination.\n\t\tIf the resource is scalable it will be scaled to 0 before deletion.\x00\n\t\tDisplay Resource (CPU/Memory/Storage) usage of nodes.\n\n\t\tThe top-node command allows you to see the resource consumption of nodes.\x00\n\t\tDisplay Resource (CPU/Memory/Storage) usage of pods.\n\n\t\tThe 'top pod' command allows you to see the resource consumption of pods.\n\n\t\tDue to the metrics pipeline delay, they may be unavailable for a few minutes\n\t\tsince pod creation.\x00\n\t\tDisplay Resource (CPU/Memory/Storage) usage.\n\n\t\tThe top command allows you to see the resource consumption for nodes or pods.\n\n\t\tThis command requires Heapster to be correctly configured and working on the server. \x00\n\t\tDrain node in preparation for maintenance.\n\n\t\tThe given node will be marked unschedulable to prevent new pods from arriving.\n\t\t'drain' evicts the pods if the APIServer supports eviction\n\t\t(http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will use normal DELETE\n\t\tto delete the pods.\n\t\tThe 'drain' evicts or deletes all pods except mirror pods (which cannot be deleted through\n\t\tthe API server). If there are DaemonSet-managed pods, drain will not proceed\n\t\twithout --ignore-daemonsets, and regardless it will not delete any\n\t\tDaemonSet-managed pods, because those pods would be immediately replaced by the\n\t\tDaemonSet controller, which ignores unschedulable markings. If there are any\n\t\tpods that are neither mirror pods nor managed by ReplicationController,\n\t\tReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete any pods unless you\n\t\tuse --force. --force will also allow deletion to proceed if the managing resource of one\n\t\tor more pods is missing.\n\n\t\t'drain' waits for graceful termination. You should not operate on the machine until\n\t\tthe command completes.\n\n\t\tWhen you are ready to put the node back into service, use kubectl uncordon, which\n\t\twill make the node schedulable again.\n\n\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)\x00\n\t\tEdit a resource from the default editor.\n\n\t\tThe edit command allows you to directly edit any API resource you can retrieve via the\n\t\tcommand line tools. It will open the editor defined by your KUBE_EDITOR, or EDITOR\n\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for Windows.\n\t\tYou can edit multiple objects, although changes are applied one at a time. The command\n\t\taccepts filenames as well as command line arguments, although the files you point to must\n\t\tbe previously saved versions of resources.\n\n\t\tEditing is done with the API version used to fetch the resource.\n\t\tTo edit using a specific API version, fully-qualify the resource, version, and group.\n\n\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n\n\t\tThe flag --windows-line-endings can be used to force Windows line endings,\n\t\totherwise the default for your operating system will be used.\n\n\t\tIn the event an error occurs while updating, a temporary file will be created on disk\n\t\tthat contains your unapplied changes. The most common error when updating a resource\n\t\tis another editor changing the resource on the server. When this occurs, you will have\n\t\tto apply your changes to the newer version of the resource, or update your temporary\n\t\tsaved copy to include the latest resource version.\x00\n\t\tMark node as schedulable.\x00\n\t\tMark node as unschedulable.\x00\n\t\tOutput shell completion code for the specified shell (bash or zsh).\n\t\tThe shell code must be evaluated to provide interactive\n\t\tcompletion of kubectl commands. This can be done by sourcing it from\n\t\tthe .bash_profile.\n\n\t\tNote: this requires the bash-completion framework, which is not installed\n\t\tby default on Mac. This can be installed by using homebrew:\n\n\t\t $ brew install bash-completion\n\n\t\tOnce installed, bash_completion must be evaluated. This can be done by adding the\n\t\tfollowing line to the .bash_profile\n\n\t\t $ source $(brew --prefix)/etc/bash_completion\n\n\t\tNote for zsh users: [1] zsh completions are only supported in versions of zsh >= 5.2\x00\n\t\tPerform a rolling update of the given ReplicationController.\n\n\t\tReplaces the specified replication controller with a new replication controller by updating one pod at a time to use the\n\t\tnew PodTemplate. The new-controller.json must specify the same namespace as the\n\t\texisting replication controller and overwrite at least one (common) label in its replicaSelector.\n\n\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate.svg)\x00\n\t\tReplace a resource by filename or stdin.\n\n\t\tJSON and YAML formats are accepted. If replacing an existing resource, the\n\t\tcomplete resource spec must be provided. This can be obtained by\n\n\t\t $ kubectl get TYPE NAME -o yaml\n\n\t\tPlease refer to the models in https://htmlpreview.github.io/?https://github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/definitions.html to find if a field is mutable.\x00\n\t\tSet a new size for a Deployment, ReplicaSet, Replication Controller, or Job.\n\n\t\tScale also allows users to specify one or more preconditions for the scale action.\n\n\t\tIf --current-replicas or --resource-version is specified, it is validated before the\n\t\tscale is attempted, and it is guaranteed that the precondition holds true when the\n\t\tscale is sent to the server.\x00\n\t\tSet the latest last-applied-configuration annotations by setting it to match the contents of a file.\n\t\tThis results in the last-applied-configuration being updated as though 'kubectl apply -f ' was run,\n\t\twithout updating any other parts of the object.\x00\n\t\tTo proxy all of the kubernetes api and nothing else, use:\n\n\t\t $ kubectl proxy --api-prefix=/\n\n\t\tTo proxy only part of the kubernetes api and also some static files:\n\n\t\t $ kubectl proxy --www=/my/files --www-prefix=/static/ --api-prefix=/api/\n\n\t\tThe above lets you 'curl localhost:8001/api/v1/pods'.\n\n\t\tTo proxy the entire kubernetes api at a different root, use:\n\n\t\t $ kubectl proxy --api-prefix=/custom/\n\n\t\tThe above lets you 'curl localhost:8001/custom/api/v1/pods'\x00\n\t\tUpdate field(s) of a resource using strategic merge patch\n\n\t\tJSON and YAML formats are accepted.\n\n\t\tPlease refer to the models in https://htmlpreview.github.io/?https://github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/definitions.html to find if a field is mutable.\x00\n\t\tUpdate the labels on a resource.\n\n\t\t* A label must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores, up to %[1]d characters.\n\t\t* If --overwrite is true, then existing labels can be overwritten, otherwise attempting to overwrite a label will result in an error.\n\t\t* If --resource-version is specified, then updates will use this resource version, otherwise the existing resource-version will be used.\x00\n\t\tUpdate the taints on one or more nodes.\n\n\t\t* A taint consists of a key, value, and effect. As an argument here, it is expressed as key=value:effect.\n\t\t* The key must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores, up to %[1]d characters.\n\t\t* The value must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores, up to %[2]d characters.\n\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n\t\t* Currently taint can only apply to node.\x00\n\t\tView the latest last-applied-configuration annotations by type/name or file.\n\n\t\tThe default output will be printed to stdout in YAML format. One can use -o option\n\t\tto change output format.\x00\n\t # !!!Important Note!!!\n\t # Requires that the 'tar' binary is present in your container\n\t # image. If 'tar' is not present, 'kubectl cp' will fail.\n\n\t # Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in the default namespace\n\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n\n # Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific container\n\t\tkubectl cp /tmp/foo :/tmp/bar -c \n\n\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace \n\t\tkubectl cp /tmp/foo /:/tmp/bar\n\n\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n\t\tkubectl cp /:/tmp/foo /tmp/bar\x00\n\t # Create a new TLS secret named tls-secret with the given key pair:\n\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --key=path/to/tls.key\x00\n\t # Create a new namespace named my-namespace\n\t kubectl create namespace my-namespace\x00\n\t # Create a new secret named my-secret with keys for each file in folder bar\n\t kubectl create secret generic my-secret --from-file=path/to/bar\n\n\t # Create a new secret named my-secret with specified keys instead of names on disk\n\t kubectl create secret generic my-secret --from-file=ssh-privatekey=~/.ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub\n\n\t # Create a new secret named my-secret with key1=supersecret and key2=topsecret\n\t kubectl create secret generic my-secret --from-literal=key1=supersecret --from-literal=key2=topsecret\x00\n\t # Create a new service account named my-service-account\n\t kubectl create serviceaccount my-service-account\x00\n\t# Create a new ExternalName service named my-ns \n\tkubectl create service externalname my-ns --external-name bar.com\x00\n\tCreate an ExternalName service with the specified name.\n\n\tExternalName service references to an external DNS address instead of\n\tonly pods, which will allow application authors to reference services\n\tthat exist off platform, on other clusters, or locally.\x00\n\tHelp provides help for any command in the application.\n\tSimply type kubectl help [path to command] for full details.\x00\n # Create a new LoadBalancer service named my-lbs\n kubectl create service loadbalancer my-lbs --tcp=5678:8080\x00\n # Create a new clusterIP service named my-cs\n kubectl create service clusterip my-cs --tcp=5678:8080\n\n # Create a new clusterIP service named my-cs (in headless mode)\n kubectl create service clusterip my-cs --clusterip=\"None\"\x00\n # Create a new deployment named my-dep that runs the busybox image.\n kubectl create deployment my-dep --image=busybox\x00\n # Create a new nodeport service named my-ns\n kubectl create service nodeport my-ns --tcp=5678:8080\x00\n # Dump current cluster state to stdout\n kubectl cluster-info dump\n\n # Dump current cluster state to /path/to/cluster-state\n kubectl cluster-info dump --output-directory=/path/to/cluster-state\n\n # Dump all namespaces to stdout\n kubectl cluster-info dump --all-namespaces\n\n # Dump a set of namespaces to /path/to/cluster-state\n kubectl cluster-info dump --namespaces default,kube-system --output-directory=/path/to/cluster-state\x00\n # Update pod 'foo' with the annotation 'description' and the value 'my frontend'.\n # If the same annotation is set multiple times, only the last value will be applied\n kubectl annotate pods foo description='my frontend'\n\n # Update a pod identified by type and name in \"pod.json\"\n kubectl annotate -f pod.json description='my frontend'\n\n # Update pod 'foo' with the annotation 'description' and the value 'my frontend running nginx', overwriting any existing value.\n kubectl annotate --overwrite pods foo description='my frontend running nginx'\n\n # Update all pods in the namespace\n kubectl annotate pods --all description='my frontend running nginx'\n\n # Update pod 'foo' only if the resource is unchanged from version 1.\n kubectl annotate pods foo description='my frontend running nginx' --resource-version=1\n\n # Update pod 'foo' by removing an annotation named 'description' if it exists.\n # Does not require the --overwrite flag.\n kubectl annotate pods foo description-\x00\n Create a LoadBalancer service with the specified name.\x00\n Create a clusterIP service with the specified name.\x00\n Create a deployment with the specified name.\x00\n Create a nodeport service with the specified name.\x00\n Dumps cluster info out suitable for debugging and diagnosing cluster problems. By default, dumps everything to\n stdout. You can optionally specify a directory with --output-directory. If you specify a directory, kubernetes will\n build a set of files in that directory. By default only dumps things in the 'kube-system' namespace, but you can\n switch to a different namespace with the --namespaces flag, or specify --all-namespaces to dump all namespaces.\n\n The command also dumps the logs of all of the pods in the cluster, these logs are dumped into different directories\n based on namespace and pod name.\x00\n Display addresses of the master and services with label kubernetes.io/cluster-service=true\n To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.\x00A comma-delimited set of quota scopes that must all match each object tracked by the quota.\x00A comma-delimited set of resource=quantity pairs that define a hard limit.\x00A label selector to use for this budget. Only equality-based selector requirements are supported.\x00A label selector to use for this service. Only equality-based selector requirements are supported. If empty (the default) infer the selector from the replication controller or replica set.)\x00A schedule in the Cron format the job should be run with.\x00Additional external IP address (not managed by Kubernetes) to accept for the service. If this IP is routed to a node, the service can be accessed by this IP in addition to its generated service IP.\x00An inline JSON override for the generated object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field.\x00An inline JSON override for the generated service object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field. Only used if --expose is true.\x00Apply a configuration to a resource by filename or stdin\x00Approve a certificate signing request\x00Assign your own ClusterIP or set to 'None' for a 'headless' service (no loadbalancing).\x00Attach to a running container\x00Auto-scale a Deployment, ReplicaSet, or ReplicationController\x00ClusterIP to be assigned to the service. Leave empty to auto-allocate, or set to 'None' to create a headless service.\x00ClusterRole this ClusterRoleBinding should reference\x00ClusterRole this RoleBinding should reference\x00Container name which will have its image upgraded. Only relevant when --image is specified, ignored otherwise. Required when using --image on a multi-container pod\x00Convert config files between different API versions\x00Copy files and directories to and from containers.\x00Create a ClusterRoleBinding for a particular ClusterRole\x00Create a LoadBalancer service.\x00Create a NodePort service.\x00Create a RoleBinding for a particular Role or ClusterRole\x00Create a TLS secret\x00Create a clusterIP service.\x00Create a configmap from a local file, directory or literal value\x00Create a deployment with the specified name.\x00Create a namespace with the specified name\x00Create a pod disruption budget with the specified name.\x00Create a quota with the specified name.\x00Create a resource by filename or stdin\x00Create a secret for use with a Docker registry\x00Create a secret from a local file, directory or literal value\x00Create a secret using specified subcommand\x00Create a service account with the specified name\x00Create a service using specified subcommand.\x00Create an ExternalName service.\x00Delete resources by filenames, stdin, resources and names, or by resources and label selector\x00Delete the specified cluster from the kubeconfig\x00Delete the specified context from the kubeconfig\x00Deny a certificate signing request\x00Deprecated: Gracefully shut down a resource by name or filename\x00Describe one or many contexts\x00Display Resource (CPU/Memory) usage of nodes\x00Display Resource (CPU/Memory) usage of pods\x00Display Resource (CPU/Memory) usage.\x00Display cluster info\x00Display clusters defined in the kubeconfig\x00Display merged kubeconfig settings or a specified kubeconfig file\x00Display one or many resources\x00Displays the current-context\x00Documentation of resources\x00Drain node in preparation for maintenance\x00Dump lots of relevant info for debugging and diagnosis\x00Edit a resource on the server\x00Email for Docker registry\x00Execute a command in a container\x00Explicit policy for when to pull container images. Required when --image is same as existing image, ignored otherwise.\x00Forward one or more local ports to a pod\x00Help about any command\x00IP to assign to the Load Balancer. If empty, an ephemeral IP will be created and used (cloud-provider specific).\x00If non-empty, set the session affinity for the service to this; legal values: 'None', 'ClientIP'\x00If non-empty, the annotation update will only succeed if this is the current resource-version for the object. Only valid when specifying a single resource.\x00If non-empty, the labels update will only succeed if this is the current resource-version for the object. Only valid when specifying a single resource.\x00Image to use for upgrading the replication controller. Must be distinct from the existing image (either new image or new image tag). Can not be used with --filename/-f\x00Manage a deployment rollout\x00Mark node as schedulable\x00Mark node as unschedulable\x00Mark the provided resource as paused\x00Modify certificate resources.\x00Modify kubeconfig files\x00Name or number for the port on the container that the service should direct traffic to. Optional.\x00Only return logs after a specific date (RFC3339). Defaults to all logs. Only one of since-time / since may be used.\x00Output shell completion code for the specified shell (bash or zsh)\x00Output the formatted object with the given group version (for ex: 'extensions/v1beta1').)\x00Password for Docker registry authentication\x00Path to PEM encoded public key certificate.\x00Path to private key associated with given certificate.\x00Perform a rolling update of the given ReplicationController\x00Precondition for resource version. Requires that the current resource version match this value in order to scale.\x00Print the client and server version information\x00Print the list of flags inherited by all commands\x00Print the logs for a container in a pod\x00Replace a resource by filename or stdin\x00Resume a paused resource\x00Role this RoleBinding should reference\x00Run a particular image on the cluster\x00Run a proxy to the Kubernetes API server\x00Server location for Docker registry\x00Set a new size for a Deployment, ReplicaSet, Replication Controller, or Job\x00Set specific features on objects\x00Set the last-applied-configuration annotation on a live object to match the contents of a file.\x00Set the selector on a resource\x00Sets a cluster entry in kubeconfig\x00Sets a context entry in kubeconfig\x00Sets a user entry in kubeconfig\x00Sets an individual value in a kubeconfig file\x00Sets the current-context in a kubeconfig file\x00Show details of a specific resource or group of resources\x00Show the status of the rollout\x00Synonym for --target-port\x00Take a replication controller, service, deployment or pod and expose it as a new Kubernetes Service\x00The image for the container to run.\x00The image pull policy for the container. If left empty, this value will not be specified by the client and defaulted by the server\x00The key to use to differentiate between two different controllers, default 'deployment'. Only relevant when --image is specified, ignored otherwise\x00The minimum number or percentage of available pods this budget requires.\x00The name for the newly created object.\x00The name for the newly created object. If not specified, the name of the input resource will be used.\x00The name of the API generator to use, see http://kubernetes.io/docs/user-guide/kubectl-conventions/#generators for a list.\x00The name of the API generator to use. Currently there is only 1 generator.\x00The name of the API generator to use. There are 2 generators: 'service/v1' and 'service/v2'. The only difference between them is that service port in v1 is named 'default', while it is left unnamed in v2. Default is 'service/v2'.\x00The name of the generator to use for creating a service. Only used if --expose is true\x00The network protocol for the service to be created. Default is 'TCP'.\x00The port that the service should serve on. Copied from the resource being exposed, if unspecified\x00The port that this container exposes. If --expose is true, this is also the port used by the service that is created.\x00The resource requirement limits for this container. For example, 'cpu=200m,memory=512Mi'. Note that server side components may assign limits depending on the server configuration, such as limit ranges.\x00The resource requirement requests for this container. For example, 'cpu=100m,memory=256Mi'. Note that server side components may assign requests depending on the server configuration, such as limit ranges.\x00The restart policy for this Pod. Legal values [Always, OnFailure, Never]. If set to 'Always' a deployment is created, if set to 'OnFailure' a job is created, if set to 'Never', a regular pod is created. For the latter two --replicas must be 1. Default 'Always', for CronJobs `Never`.\x00The type of secret to create\x00Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is 'ClusterIP'.\x00Undo a previous rollout\x00Unsets an individual value in a kubeconfig file\x00Update field(s) of a resource using strategic merge patch\x00Update image of a pod template\x00Update resource requests/limits on objects with pod templates\x00Update the annotations on a resource\x00Update the labels on a resource\x00Update the taints on one or more nodes\x00Username for Docker registry authentication\x00View latest last-applied-configuration annotations of a resource/object\x00View rollout history\x00Where to output the files. If empty or '-' uses stdout, otherwise creates a directory hierarchy in that directory\x00dummy restart flag)\x00external name of service\x00kubectl controls the Kubernetes cluster manager\x00Project-Id-Version: gettext-go-examples-hello\nReport-Msgid-Bugs-To: EMAIL\nPOT-Creation-Date: 2017-03-14 21:32-0700\nPO-Revision-Date: 2019-02-14 10:33+0900\nLast-Translator: Giri Kuncoro \nLanguage-Team: \nLanguage: ja\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: Poedit 2.1.1\nX-Poedit-SourceCharset: UTF-8\nPlural-Forms: nplurals=2; plural=(n > 1);\n\x00\n\t\t # Create a ClusterRoleBinding for user1, user2, and group1 using the cluster-admin ClusterRole\n\t\t kubectl create clusterrolebinding cluster-admin \u2014clusterrole=cluster-admin \u2014user=user1 \u2014user=user2 \u2014group=group1\x00\n\t\t # Create a RoleBinding for user1, user2, and group1 using the admin ClusterRole\n\t\t kubectl create rolebinding admin \u2014clusterrole=admin \u2014user=user1 \u2014user=user2 \u2014group=group1\x00\n\t\t # Create a new configmap named my-config based on folder bar\n\t\t kubectl create configmap my-config \u2014from-file=path/to/bar\n\n\t\t # Create a new configmap named my-config with specified keys instead of file basenames on disk\n\t\t kubectl create configmap my-config \u2014from-file=key1=/path/to/bar/file1.txt \u2014from-file=key2=/path/to/bar/file2.txt\n\n\t\t # Create a new configmap named my-config with key1=config1 and key2=config2\n\t\t kubectl create configmap my-config \u2014from-literal=key1=config1 \u2014from-literal=key2=config2\x00\n\t\t # If you don\u2019t already have a .dockercfg file, you can create a dockercfg secret directly by using:\n\t\t kubectl create secret docker-registry my-secret \u2014docker-server=DOCKER_REGISTRY_SERVER \u2014docker-username=DOCKER_USER \u2014docker-password=DOCKER_PASSWORD \u2014docker-email=DOCKER_EMAIL\x00\n\t\t # Show metrics for all nodes\n\t\t kubectl top node\n\n\t\t # Show metrics for a given node\n\t\t kubectl top node NODE_NAME\x00\n\t\t# Apply the configuration in pod.json to a pod.\n\t\tkubectl apply -f ./pod.json\n\n\t\t# Apply the JSON passed into stdin to a pod.\n\t\tcat pod.json | kubectl apply -f -\n\n\t\t# Note: \u2014prune is still in Alpha\n\t\t# Apply the configuration in manifest.yaml that matches label app=nginx and delete all the other resources that are not in the file and match label app=nginx.\n\t\tkubectl apply \u2014prune -f manifest.yaml -l app=nginx\n\n\t\t# Apply the configuration in manifest.yaml and delete all the other configmaps that are not in the file.\n\t\tkubectl apply \u2014prune -f manifest.yaml \u2014all \u2014prune-whitelist=core/v1/ConfigMap\x00\n\t\t# Auto scale a deployment \u201cfoo\u201d, with the number of pods between 2 and 10, no target CPU utilization specified so a default autoscaling policy will be used:\n\t\tkubectl autoscale deployment foo \u2014min=2 \u2014max=10\n\n\t\t# Auto scale a replication controller \u201cfoo\u201d, with the number of pods between 1 and 5, target CPU utilization at 80%:\n\t\tkubectl autoscale rc foo \u2014max=5 \u2014cpu-percent=80\x00\n\t\t# Convert \u2018pod.yaml\u2019 to latest version and print to stdout.\n\t\tkubectl convert -f pod.yaml\n\n\t\t# Convert the live state of the resource specified by \u2018pod.yaml\u2019 to the latest version\n\t\t# and print to stdout in json format.\n\t\tkubectl convert -f pod.yaml \u2014local -o json\n\n\t\t# Convert all files under current directory to latest version and create them all.\n\t\tkubectl convert -f . | kubectl create -f -\x00\n\t\t# Create a ClusterRole named \u201cpod-reader\u201d that allows user to perform \u201cget\u201d, \u201cwatch\u201d and \u201clist\u201d on pods\n\t\tkubectl create clusterrole pod-reader \u2014verb=get,list,watch \u2014resource=pods\n\n\t\t# Create a ClusterRole named \u201cpod-reader\u201d with ResourceName specified\n\t\tkubectl create clusterrole pod-reader \u2014verb=get,list,watch \u2014resource=pods \u2014resource-name=readablepod\x00\n\t\t# Create a Role named \u201cpod-reader\u201d that allows user to perform \u201cget\u201d, \u201cwatch\u201d and \u201clist\u201d on pods\n\t\tkubectl create role pod-reader \u2014verb=get \u2014verb=list \u2014verb=watch \u2014resource=pods\n\n\t\t# Create a Role named \u201cpod-reader\u201d with ResourceName specified\n\t\tkubectl create role pod-reader \u2014verb=get \u2014verg=list \u2014verb=watch \u2014resource=pods \u2014resource-name=readablepod\x00\n\t\t# Create a new resourcequota named my-quota\n\t\tkubectl create quota my-quota \u2014hard=cpu=1,memory=1G,pods=2,services=3,replicationcontrollers=2,resourcequotas=1,secrets=5,persistentvolumeclaims=10\n\n\t\t# Create a new resourcequota named best-effort\n\t\tkubectl create quota best-effort \u2014hard=pods=100 \u2014scopes=BestEffort\x00\n\t\t# Create a pod disruption budget named my-pdb that will select all pods with the app=rails label\n\t\t# and require at least one of them being available at any point in time.\n\t\tkubectl create poddisruptionbudget my-pdb \u2014selector=app=rails \u2014min-available=1\n\n\t\t# Create a pod disruption budget named my-pdb that will select all pods with the app=nginx label\n\t\t# and require at least half of the pods selected to be available at any point in time.\n\t\tkubectl create pdb my-pdb \u2014selector=app=nginx \u2014min-available=50%\x00\n\t\t# Create a pod using the data in pod.json.\n\t\tkubectl create -f ./pod.json\n\n\t\t# Create a pod based on the JSON passed into stdin.\n\t\tcat pod.json | kubectl create -f -\n\n\t\t# Edit the data in docker-registry.yaml in JSON using the v1 API format then create the resource using the edited data.\n\t\tkubectl create -f docker-registry.yaml \u2014edit \u2014output-version=v1 -o json\x00\n\t\t# Create a service for a replicated nginx, which serves on port 80 and connects to the containers on port 8000.\n\t\tkubectl expose rc nginx --port=80 --target-port=8000\n\n\t\t# Create a service for a replication controller identified by type and name specified in \"nginx-controller.yaml\", which serves on port 80 and connects to the containers on port 8000.\n\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n\n\t\t# Create a service for a pod valid-pod, which serves on port 444 with the name \"frontend\"\n\t\tkubectl expose pod valid-pod \u2014port=444 \u2014name=frontend\n\n\t\t# Create a second service based on the above service, exposing the container port 8443 as port 443 with the name \u201cnginx-https\u201d\n\t\tkubectl expose service nginx \u2014port=443 \u2014target-port=8443 \u2014name=nginx-https\n\n\t\t# Create a service for a replicated streaming application on port 4100 balancing UDP traffic and named \u2018video-stream\u2019.\n\t\tkubectl expose rc streamer \u2014port=4100 \u2014protocol=udp \u2014name=video-stream\n\n\t\t# Create a service for a replicated nginx using replica set, which serves on port 80 and connects to the containers on port 8000.\n\t\tkubectl expose rs nginx \u2014port=80 \u2014target-port=8000\n\n\t\t# Create a service for an nginx deployment, which serves on port 80 and connects to the containers on port 8000.\n\t\tkubectl expose deployment nginx \u2014port=80 \u2014target-port=8000\x00\n\t\t# Delete a pod using the type and name specified in pod.json.\n\t\tkubectl delete -f ./pod.json\n\n\t\t# Delete a pod based on the type and name in the JSON passed into stdin.\n\t\tcat pod.json | kubectl delete -f -\n\n\t\t# Delete pods and services with same names \u201cbaz\u201d and \u201cfoo\u201d\n\t\tkubectl delete pod,service baz foo\n\n\t\t# Delete pods and services with label name=myLabel.\n\t\tkubectl delete pods,services -l name=myLabel\n\n\t\t# Delete a pod with minimal delay\n\t\tkubectl delete pod foo \u2014now\n\n\t\t# Force delete a pod on a dead node\n\t\tkubectl delete pod foo \u2014grace-period=0 \u2014force\n\n\t\t# Delete all pods\n\t\tkubectl delete pods \u2014all\x00\n\t\t# Describe a node\n\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n\n\t\t# Describe a pod\n\t\tkubectl describe pods/nginx\n\n\t\t# Describe a pod identified by type and name in \u201cpod.json\u201d\n\t\tkubectl describe -f pod.json\n\n\t\t# Describe all pods\n\t\tkubectl describe pods\n\n\t\t# Describe pods by label name=myLabel\n\t\tkubectl describe po -l name=myLabel\n\n\t\t# Describe all pods managed by the \u2018frontend\u2019 replication controller (rc-created pods\n\t\t# get the name of the rc as a prefix in the pod the name).\n\t\tkubectl describe pods frontend\x00\n\t\t# Drain node \u201cfoo\u201d, even if there are pods not managed by a ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet on it.\n\t\t$ kubectl drain foo \u2014force\n\n\t\t# As above, but abort if there are pods not managed by a ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet, and use a grace period of 15 minutes.\n\t\t$ kubectl drain foo \u2014grace-period=900\x00\n\t\t# Edit the service named \u2018docker-registry\u2019:\n\t\tkubectl edit svc/docker-registry\n\n\t\t# Use an alternative editor\n\t\tKUBE_EDITOR=\u201cnano\u201d kubectl edit svc/docker-registry\n\n\t\t# Edit the job \u2018myjob\u2019 in JSON using the v1 API format:\n\t\tkubectl edit job.v1.batch/myjob -o json\n\n\t\t# Edit the deployment \u2018mydeployment\u2019 in YAML and save the modified config in its annotation:\n\t\tkubectl edit deployment/mydeployment -o yaml \u2014save-config\x00\n\t\t# Get output from running \u2018date\u2019 from pod 123456-7890, using the first container by default\n\t\tkubectl exec 123456-7890 date\n\n\t\t# Get output from running \u2018date\u2019 in ruby-container from pod 123456-7890\n\t\tkubectl exec 123456-7890 -c ruby-container date\n\n\t\t# Switch to raw terminal mode, sends stdin to \u2018bash\u2019 in ruby-container from pod 123456-7890\n\t\t# and sends stdout/stderr from \u2018bash\u2019 back to the client\n\t\tkubectl exec 123456-7890 -c ruby-container -i -t \u2014 bash -il\x00\n\t\t# Get output from running pod 123456-7890, using the first container by default\n\t\tkubectl attach 123456-7890\n\n\t\t# Get output from ruby-container from pod 123456-7890\n\t\tkubectl attach 123456-7890 -c ruby-container\n\n\t\t# Switch to raw terminal mode, sends stdin to \u2018bash\u2019 in ruby-container from pod 123456-7890\n\t\t# and sends stdout/stderr from \u2018bash\u2019 back to the client\n\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n\n\t\t# Get output from the first pod of a ReplicaSet named nginx\n\t\tkubectl attach rs/nginx\n\t\t\x00\n\t\t# Get the documentation of the resource and its fields\n\t\tkubectl explain pods\n\n\t\t# Get the documentation of a specific field of a resource\n\t\tkubectl explain pods.spec.containers\x00\n\t\t# Install bash completion on a Mac using homebrew\n\t\tbrew install bash-completion\n\t\tprintf \"\n# Bash completion support\nsource $(brew --prefix)/etc/bash_completion\n\" >> $HOME/.bash_profile\n\t\tsource $HOME/.bash_profile\n\n\t\t# Load the kubectl completion code for bash into the current shell\n\t\tsource <(kubectl completion bash)\n\n\t\t# Write bash completion code to a file and source if from .bash_profile\n\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n\t\tprintf \u201c\n# Kubectl shell completion\nsource \u2018$HOME/.kube/completion.bash.inc\u2019\n\u201c >> $HOME/.bash_profile\n\t\tsource $HOME/.bash_profile\n\n\t\t# Load the kubectl completion code for zsh[1] into the current shell\n\t\tsource <(kubectl completion zsh)\x00\n\t\t# List all pods in ps output format.\n\t\tkubectl get pods\n\n\t\t# List all pods in ps output format with more information (such as node name).\n\t\tkubectl get pods -o wide\n\n\t\t# List a single replication controller with specified NAME in ps output format.\n\t\tkubectl get replicationcontroller web\n\n\t\t# List a single pod in JSON output format.\n\t\tkubectl get -o json pod web-pod-13je7\n\n\t\t# List a pod identified by type and name specified in \u201cpod.yaml\u201d in JSON output format.\n\t\tkubectl get -f pod.yaml -o json\n\n\t\t# Return only the phase value of the specified pod.\n\t\tkubectl get -o template pod/web-pod-13je7 \u2014template={{.status.phase}}\n\n\t\t# List all replication controllers and services together in ps output format.\n\t\tkubectl get rc,services\n\n\t\t# List one or more resources by their type and names.\n\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n\n\t\t# List all resources with different types.\n\t\tkubectl get all\x00\n\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in the pod\n\t\tkubectl port-forward mypod 5000 6000\n\n\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n\t\tkubectl port-forward mypod 8888:5000\n\n\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n\t\tkubectl port-forward mypod :5000\n\n\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n\t\tkubectl port-forward mypod 0:5000\x00\n\t\t# Mark node \u201cfoo\u201d as schedulable.\n\t\t$ kubectl uncordon foo\x00\n\t\t# Mark node \u201cfoo\u201d as unschedulable.\n\t\tkubectl cordon foo\x00\n\t\t# Partially update a node using strategic merge patch\n\t\tkubectl patch node k8s-node-1 -p \u2018{\u201cspec\u201d:{\u201cunschedulable\u201d:true}}\u2019\n\n\t\t# Partially update a node identified by the type and name specified in \u201cnode.json\u201d using strategic merge patch\n\t\tkubectl patch -f node.json -p \u2018{\u201cspec\u201d:{\u201cunschedulable\u201d:true}}\u2019\n\n\t\t# Update a container\u2019s image; spec.containers[*].name is required because it\u2019s a merge key\n\t\tkubectl patch pod valid-pod -p \u2018{\u201cspec\u201d:{\u201ccontainers\u201d:[{\u201cname\u201d:\u201dkubernetes-serve-hostname\u201d,\u201dimage\u201d:\u201dnew image\u201d}]}}\u2019\n\n\t\t# Update a container\u2019s image using a json patch with positional arrays\n\t\tkubectl patch pod valid-pod \u2014type=\u2018json\u2019 -p=\u2018[{\u201cop\u201d: \u201creplace\u201d, \u201cpath\u201d: \u201c/spec/containers/0/image\u201d, \u201cvalue\u201d:\u201dnew image\u201d}]\u2019\x00\n\t\t# Print flags inherited by all commands\n\t\tkubectl options\x00\n\t\t# Print the address of the master and cluster services\n\t\tkubectl cluster-info\x00\n\t\t# Print the client and server versions for the current context\n\t\tkubectl version\x00\n\t\t# Print the supported API versions\n\t\tkubectl api-versions\x00\n\t\t# Replace a pod using the data in pod.json.\n\t\tkubectl replace -f ./pod.json\n\n\t\t# Replace a pod based on the JSON passed into stdin.\n\t\tcat pod.json | kubectl replace -f -\n\n\t\t# Update a single-container pod\u2019s image version (tag) to v4\n\t\tkubectl get pod mypod -o yaml | sed \u2019s/\\(image: myimage\\):.*$/\x01:v4/\u2018 | kubectl replace -f -\n\n\t\t# Force replace, delete and then re-create the resource\n\t\tkubectl replace \u2014force -f ./pod.json\x00\n\t\t# Return snapshot logs from pod nginx with only one container\n\t\tkubectl logs nginx\n\n\t\t# Return snapshot logs for the pods defined by label app=nginx\n\t\tkubectl logs -lapp=nginx\n\n\t\t# Return snapshot of previous terminated ruby container logs from pod web-1\n\t\tkubectl logs -p -c ruby web-1\n\n\t\t# Begin streaming the logs of the ruby container in pod web-1\n\t\tkubectl logs -f -c ruby web-1\n\n\t\t# Display only the most recent 20 lines of output in pod nginx\n\t\tkubectl logs \u2014tail=20 nginx\n\n\t\t# Show all logs from pod nginx written in the last hour\n\t\tkubectl logs \u2014since=1h nginx\n\n\t\t# Return snapshot logs from first container of a job named hello\n\t\tkubectl logs job/hello\n\n\t\t# Return snapshot logs from container nginx-1 of a deployment named nginx\n\t\tkubectl logs deployment/nginx -c nginx-1\x00\n\t\t# Run a proxy to kubernetes apiserver on port 8011, serving static content from ./local/www/\n\t\tkubectl proxy \u2014port=8011 \u2014www=./local/www/\n\n\t\t# Run a proxy to kubernetes apiserver on an arbitrary local port.\n\t\t# The chosen port for the server will be output to stdout.\n\t\tkubectl proxy \u2014port=0\n\n\t\t# Run a proxy to kubernetes apiserver, changing the api prefix to k8s-api\n\t\t# This makes e.g. the pods api available at localhost:8001/k8s-api/v1/pods/\n\t\tkubectl proxy \u2014api-prefix=/k8s-api\x00\n\t\t# Scale a replicaset named \u2018foo\u2019 to 3.\n\t\tkubectl scale \u2014replicas=3 rs/foo\n\n\t\t# Scale a resource identified by type and name specified in \u201cfoo.yaml\u201d to 3.\n\t\tkubectl scale \u2014replicas=3 -f foo.yaml\n\n\t\t# If the deployment named mysql\u2019s current size is 2, scale mysql to 3.\n\t\tkubectl scale \u2014current-replicas=2 \u2014replicas=3 deployment/mysql\n\n\t\t# Scale multiple replication controllers.\n\t\tkubectl scale \u2014replicas=5 rc/foo rc/bar rc/baz\n\n\t\t# Scale job named \u2018cron\u2019 to 3.\n\t\tkubectl scale \u2014replicas=3 job/cron\x00\n\t\t# Set the last-applied-configuration of a resource to match the contents of a file.\n\t\tkubectl apply set-last-applied -f deploy.yaml\n\n\t\t# Execute set-last-applied against each configuration file in a directory.\n\t\tkubectl apply set-last-applied -f path/\n\n\t\t# Set the last-applied-configuration of a resource to match the contents of a file, will create the annotation if it does not already exist.\n\t\tkubectl apply set-last-applied -f deploy.yaml \u2014create-annotation=true\n\t\t\x00\n\t\t# Show metrics for all pods in the default namespace\n\t\tkubectl top pod\n\n\t\t# Show metrics for all pods in the given namespace\n\t\tkubectl top pod \u2014namespace=NAMESPACE\n\n\t\t# Show metrics for a given pod and its containers\n\t\tkubectl top pod POD_NAME \u2014containers\n\n\t\t# Show metrics for the pods defined by label name=myLabel\n\t\tkubectl top pod -l name=myLabel\x00\n\t\t# Shut down foo.\n\t\tkubectl stop replicationcontroller foo\n\n\t\t# Stop pods and services with label name=myLabel.\n\t\tkubectl stop pods,services -l name=myLabel\n\n\t\t# Shut down the service defined in service.json\n\t\tkubectl stop -f service.json\n\n\t\t# Shut down all resources in the path/to/resources directory\n\t\tkubectl stop -f path/to/resources\x00\n\t\t# Start a single instance of nginx.\n\t\tkubectl run nginx --image=nginx\n\n\t\t# Start a single instance of hazelcast and let the container expose port 5701 .\n\t\tkubectl run hazelcast --image=hazelcast --port=5701\n\n\t\t# Start a single instance of hazelcast and set environment variables \"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container.\n\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" --env=\"POD_NAMESPACE=default\"\n\n\t\t# Start a replicated instance of nginx.\n\t\tkubectl run nginx --image=nginx --replicas=5\n\n\t\t# Dry run. Print the corresponding API objects without creating them.\n\t\tkubectl run nginx --image=nginx --dry-run\n\n\t\t# Start a single instance of nginx, but overload the spec of the deployment with a partial set of values parsed from JSON.\n\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": \"v1\", \"spec\": { ... } }'\n\n\t\t# Start a pod of busybox and keep it in the foreground, don't restart it if it exits.\n\t\tkubectl run -i -t busybox \u2014image=busybox \u2014restart=Never\n\n\t\t# Start the nginx container using the default command, but use custom arguments (arg1 .. argN) for that command.\n\t\tkubectl run nginx \u2014image=nginx \u2014 \u2026 \n\n\t\t# Start the nginx container using a different command and custom arguments.\n\t\tkubectl run nginx \u2014image=nginx \u2014command \u2014 \u2026 \n\n\t\t# Start the perl container to compute \u03c0 to 2000 places and print it out.\n\t\tkubectl run pi \u2014image=perl \u2014restart=OnFailure \u2014 perl -Mbignum=bpi -wle \u2018print bpi(2000)\u2019\n\n\t\t# Start the cron job to compute \u03c0 to 2000 places and print it out every 5 minutes.\n\t\tkubectl run pi \u2014schedule=\u201c0/5 * * * ?\u201d \u2014image=perl \u2014restart=OnFailure \u2014 perl -Mbignum=bpi -wle \u2018print bpi(2000)\u2019\x00\n\t\t# Update node \u2018foo\u2019 with a taint with key \u2018dedicated\u2019 and value \u2018special-user\u2019 and effect \u2018NoSchedule\u2019.\n\t\t# If a taint with that key and effect already exists, its value is replaced as specified.\n\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n\n\t\t# Remove from node \u2018foo\u2019 the taint with key \u2018dedicated\u2019 and effect \u2018NoSchedule\u2019 if one exists.\n\t\tkubectl taint nodes foo dedicated:NoSchedule-\n\n\t\t# Remove from node \u2018foo\u2019 all the taints with key \u2018dedicated\u2019\n\t\tkubectl taint nodes foo dedicated-\x00\n\t\t# Update pod \u2018foo\u2019 with the label \u2018unhealthy\u2019 and the value \u2018true\u2019.\n\t\tkubectl label pods foo unhealthy=true\n\n\t\t# Update pod \u2018foo\u2019 with the label \u2018status\u2019 and the value \u2018unhealthy\u2019, overwriting any existing value.\n\t\tkubectl label \u2014overwrite pods foo status=unhealthy\n\n\t\t# Update all pods in the namespace\n\t\tkubectl label pods \u2014all status=unhealthy\n\n\t\t# Update a pod identified by the type and name in \u201cpod.json\u201d\n\t\tkubectl label -f pod.json status=unhealthy\n\n\t\t# Update pod \u2018foo\u2019 only if the resource is unchanged from version 1.\n\t\tkubectl label pods foo status=unhealthy \u2014resource-version=1\n\n\t\t# Update pod \u2018foo\u2019 by removing a label named \u2018bar\u2019 if it exists.\n\t\t# Does not require the \u2014overwrite flag.\n\t\tkubectl label pods foo bar-\x00\n\t\t# Update pods of frontend-v1 using new replication controller data in frontend-v2.json.\n\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n\n\t\t# Update pods of frontend-v1 using JSON data passed into stdin.\n\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n\n\t\t# Update the pods of frontend-v1 to frontend-v2 by just changing the image, and switching the\n\t\t# name of the replication controller.\n\t\tkubectl rolling-update frontend-v1 frontend-v2 \u2014image=image:v2\n\n\t\t# Update the pods of frontend by just changing the image, and keeping the old name.\n\t\tkubectl rolling-update frontend \u2014image=image:v2\n\n\t\t# Abort and reverse an existing rollout in progress (from frontend-v1 to frontend-v2).\n\t\tkubectl rolling-update frontend-v1 frontend-v2 \u2014rollback\x00\n\t\t# View the last-applied-configuration annotations by type/name in YAML.\n\t\tkubectl apply view-last-applied deployment/nginx\n\n\t\t# View the last-applied-configuration annotations by file in JSON\n\t\tkubectl apply view-last-applied -f deploy.yaml -o json\x00\n\t\tApply a configuration to a resource by filename or stdin.\n\t\tThis resource will be created if it doesn\u2019t exist yet.\n\t\tTo use \u2018apply\u2019, always create the resource initially with either \u2018apply\u2019 or \u2018create \u2014save-config\u2019.\n\n\t\tJSON and YAML formats are accepted.\n\n\t\tAlpha Disclaimer: the \u2014prune functionality is not yet complete. Do not use unless you are aware of what the current state is. See https://issues.k8s.io/34274.\x00\n\t\tConvert config files between different API versions. Both YAML\n\t\tand JSON formats are accepted.\n\n\t\tThe command takes filename, directory, or URL as input, and convert it into format\n\t\tof version specified by \u2014output-version flag. If target version is not specified or\n\t\tnot supported, convert to latest version.\n\n\t\tThe default output will be printed to stdout in YAML format. One can use -o option\n\t\tto change to output destination.\x00\n\t\tCreate a ClusterRole.\x00\n\t\tCreate a ClusterRoleBinding for a particular ClusterRole.\x00\n\t\tCreate a RoleBinding for a particular Role or ClusterRole.\x00\n\t\tCreate a TLS secret from the given public/private key pair.\n\n\t\tThe public/private key pair must exist before hand. The public key certificate must be .PEM encoded and match the given private key.\x00\n\t\tCreate a configmap based on a file, directory, or specified literal value.\n\n\t\tA single configmap may package one or more key/value pairs.\n\n\t\tWhen creating a configmap based on a file, the key will default to the basename of the file, and the value will\n\t\tdefault to the file content. If the basename is an invalid key, you may specify an alternate key.\n\n\t\tWhen creating a configmap based on a directory, each file whose basename is a valid key in the directory will be\n\t\tpackaged into the configmap. Any directory entries except regular files are ignored (e.g. subdirectories,\n\t\tsymlinks, devices, pipes, etc).\x00\n\t\tCreate a namespace with the specified name.\x00\n\t\tCreate a new secret for use with Docker registries.\n\n\t\tDockercfg secrets are used to authenticate against Docker registries.\n\n\t\tWhen using the Docker command line to push images, you can authenticate to a given registry by running\n\n\t\t $ docker login DOCKER_REGISTRY_SERVER \u2014username=DOCKER_USER \u2014password=DOCKER_PASSWORD \u2014email=DOCKER_EMAIL\u2019.\n\n That produces a ~/.dockercfg file that is used by subsequent \u2018docker push\u2019 and \u2018docker pull\u2019 commands to\n\t\tauthenticate to the registry. The email address is optional.\n\n\t\tWhen creating applications, you may have a Docker registry that requires authentication. In order for the\n\t\tnodes to pull images on your behalf, they have to have the credentials. You can provide this information\n\t\tby creating a dockercfg secret and attaching it to your service account.\x00\n\t\tCreate a pod disruption budget with the specified name, selector, and desired minimum available pods\x00\n\t\tCreate a resource by filename or stdin.\n\n\t\tJSON and YAML formats are accepted.\x00\n\t\tCreate a resourcequota with the specified name, hard limits and optional scopes\x00\n\t\tCreate a role with single rule.\x00\n\t\tCreate a secret based on a file, directory, or specified literal value.\n\n\t\tA single secret may package one or more key/value pairs.\n\n\t\tWhen creating a secret based on a file, the key will default to the basename of the file, and the value will\n\t\tdefault to the file content. If the basename is an invalid key, you may specify an alternate key.\n\n\t\tWhen creating a secret based on a directory, each file whose basename is a valid key in the directory will be\n\t\tpackaged into the secret. Any directory entries except regular files are ignored (e.g. subdirectories,\n\t\tsymlinks, devices, pipes, etc).\x00\n\t\tCreate a service account with the specified name.\x00\n\t\tCreate and run a particular image, possibly replicated.\n\n\t\tCreates a deployment or job to manage the created container(s).\x00\n\t\tCreates an autoscaler that automatically chooses and sets the number of pods that run in a kubernetes cluster.\n\n\t\tLooks up a Deployment, ReplicaSet, or ReplicationController by name and creates an autoscaler that uses the given resource as a reference.\n\t\tAn autoscaler can automatically increase or decrease number of pods deployed within the system as needed.\x00\n\t\tDelete resources by filenames, stdin, resources and names, or by resources and label selector.\n\n\t\tJSON and YAML formats are accepted. Only one type of the arguments may be specified: filenames,\n\t\tresources and names, or resources and label selector.\n\n\t\tSome resources, such as pods, support graceful deletion. These resources define a default period\n\t\tbefore they are forcibly terminated (the grace period) but you may override that value with\n\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. Because these resources often\n\t\trepresent entities in the cluster, deletion may not be acknowledged immediately. If the node\n\t\thosting a pod is down or cannot reach the API server, termination may take significantly longer\n\t\tthan the grace period. To force delete a resource,\tyou must pass a grace\tperiod of 0 and specify\n\t\tthe --force flag.\n\n\t\tIMPORTANT: Force deleting pods does not wait for confirmation that the pod\u2019s processes have been\n\t\tterminated, which can leave those processes running until the node detects the deletion and\n\t\tcompletes graceful deletion. If your processes use shared storage or talk to a remote API and\n\t\tdepend on the name of the pod to identify themselves, force deleting those pods may result in\n\t\tmultiple processes running on different machines using the same identification which may lead\n\t\tto data corruption or inconsistency. Only force delete pods when you are sure the pod is\n\t\tterminated, or if your application can tolerate multiple copies of the same pod running at once.\n\t\tAlso, if you force delete pods the scheduler may place new pods on those nodes before the node\n\t\thas released those resources and causing those pods to be evicted immediately.\n\n\t\tNote that the delete command does NOT do resource version checks, so if someone\n\t\tsubmits an update to a resource right when you submit a delete, their update\n\t\twill be lost along with the rest of the resource.\x00\n\t\tDeprecated: Gracefully shut down a resource by name or filename.\n\n\t\tThe stop command is deprecated, all its functionalities are covered by delete command.\n\t\tSee \u2018kubectl delete \u2014help\u2019 for more details.\n\n\t\tAttempts to shut down and delete a resource that supports graceful termination.\n\t\tIf the resource is scalable it will be scaled to 0 before deletion.\x00\n\t\tDisplay Resource (CPU/Memory/Storage) usage of nodes.\n\n\t\tThe top-node command allows you to see the resource consumption of nodes.\x00\n\t\tDisplay Resource (CPU/Memory/Storage) usage of pods.\n\n\t\tThe \u2018top pod\u2019 command allows you to see the resource consumption of pods.\n\n\t\tDue to the metrics pipeline delay, they may be unavailable for a few minutes\n\t\tsince pod creation.\x00\n\t\tDisplay Resource (CPU/Memory/Storage) usage.\n\n\t\tThe top command allows you to see the resource consumption for nodes or pods.\n\n\t\tThis command requires Heapster to be correctly configured and working on the server. \x00\n\t\tDrain node in preparation for maintenance.\n\n\t\tThe given node will be marked unschedulable to prevent new pods from arriving.\n\t\t'drain' evicts the pods if the APIServer supports eviction\n\t\t(http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will use normal DELETE\n\t\tto delete the pods.\n\t\tThe 'drain' evicts or deletes all pods except mirror pods (which cannot be deleted through\n\t\tthe API server). If there are DaemonSet-managed pods, drain will not proceed\n\t\twithout --ignore-daemonsets, and regardless it will not delete any\n\t\tDaemonSet-managed pods, because those pods would be immediately replaced by the\n\t\tDaemonSet controller, which ignores unschedulable markings. If there are any\n\t\tpods that are neither mirror pods nor managed by ReplicationController,\n\t\tReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete any pods unless you\n\t\tuse \u2014force. \u2014force will also allow deletion to proceed if the managing resource of one\n\t\tor more pods is missing.\n\n\t\t\u2019drain\u2019 waits for graceful termination. You should not operate on the machine until\n\t\tthe command completes.\n\n\t\tWhen you are ready to put the node back into service, use kubectl uncordon, which\n\t\twill make the node schedulable again.\n\n\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)\x00\n\t\tEdit a resource from the default editor.\n\n\t\tThe edit command allows you to directly edit any API resource you can retrieve via the\n\t\tcommand line tools. It will open the editor defined by your KUBE_EDITOR, or EDITOR\n\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for Windows.\n\t\tYou can edit multiple objects, although changes are applied one at a time. The command\n\t\taccepts filenames as well as command line arguments, although the files you point to must\n\t\tbe previously saved versions of resources.\n\n\t\tEditing is done with the API version used to fetch the resource.\n\t\tTo edit using a specific API version, fully-qualify the resource, version, and group.\n\n\t\tThe default format is YAML. To edit in JSON, specify \u201c-o json\u201d.\n\n\t\tThe flag \u2014windows-line-endings can be used to force Windows line endings,\n\t\totherwise the default for your operating system will be used.\n\n\t\tIn the event an error occurs while updating, a temporary file will be created on disk\n\t\tthat contains your unapplied changes. The most common error when updating a resource\n\t\tis another editor changing the resource on the server. When this occurs, you will have\n\t\tto apply your changes to the newer version of the resource, or update your temporary\n\t\tsaved copy to include the latest resource version.\x00\n\t\tMark node as schedulable.\x00\n\t\tMark node as unschedulable.\x00\n\t\tOutput shell completion code for the specified shell (bash or zsh).\n\t\tThe shell code must be evaluated to provide interactive\n\t\tcompletion of kubectl commands. This can be done by sourcing it from\n\t\tthe .bash_profile.\n\n\t\tNote: this requires the bash-completion framework, which is not installed\n\t\tby default on Mac. This can be installed by using homebrew:\n\n\t\t $ brew install bash-completion\n\n\t\tOnce installed, bash_completion must be evaluated. This can be done by adding the\n\t\tfollowing line to the .bash_profile\n\n\t\t $ source $(brew \u2014prefix)/etc/bash_completion\n\n\t\tNote for zsh users: [1] zsh completions are only supported in versions of zsh >= 5.2\x00\n\t\tPerform a rolling update of the given ReplicationController.\n\n\t\tReplaces the specified replication controller with a new replication controller by updating one pod at a time to use the\n\t\tnew PodTemplate. The new-controller.json must specify the same namespace as the\n\t\texisting replication controller and overwrite at least one (common) label in its replicaSelector.\n\n\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate.svg)\x00\n\t\tReplace a resource by filename or stdin.\n\n\t\tJSON and YAML formats are accepted. If replacing an existing resource, the\n\t\tcomplete resource spec must be provided. This can be obtained by\n\n\t\t $ kubectl get TYPE NAME -o yaml\n\n\t\tPlease refer to the models in https://htmlpreview.github.io/?https://github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/definitions.html to find if a field is mutable.\x00\n\t\tSet a new size for a Deployment, ReplicaSet, Replication Controller, or Job.\n\n\t\tScale also allows users to specify one or more preconditions for the scale action.\n\n\t\tIf \u2014current-replicas or \u2014resource-version is specified, it is validated before the\n\t\tscale is attempted, and it is guaranteed that the precondition holds true when the\n\t\tscale is sent to the server.\x00\n\t\tSet the latest last-applied-configuration annotations by setting it to match the contents of a file.\n\t\tThis results in the last-applied-configuration being updated as though \u2018kubectl apply -f \u2019 was run,\n\t\twithout updating any other parts of the object.\x00\n\t\tTo proxy all of the kubernetes api and nothing else, use:\n\n\t\t $ kubectl proxy \u2014api-prefix=/\n\n\t\tTo proxy only part of the kubernetes api and also some static files:\n\n\t\t $ kubectl proxy \u2014www=/my/files \u2014www-prefix=/static/ \u2014api-prefix=/api/\n\n\t\tThe above lets you \u2018curl localhost:8001/api/v1/pods\u2019.\n\n\t\tTo proxy the entire kubernetes api at a different root, use:\n\n\t\t $ kubectl proxy \u2014api-prefix=/custom/\n\n\t\tThe above lets you \u2018curl localhost:8001/custom/api/v1/pods\u2019\x00\n\t\tUpdate field(s) of a resource using strategic merge patch\n\n\t\tJSON and YAML formats are accepted.\n\n\t\tPlease refer to the models in https://htmlpreview.github.io/?https://github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/definitions.html to find if a field is mutable.\x00\n\t\tUpdate the labels on a resource.\n\n\t\t* A label must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores, up to %[1]d characters.\n\t\t* If \u2014overwrite is true, then existing labels can be overwritten, otherwise attempting to overwrite a label will result in an error.\n\t\t* If \u2014resource-version is specified, then updates will use this resource version, otherwise the existing resource-version will be used.\x00\n\t\tUpdate the taints on one or more nodes.\n\n\t\t* A taint consists of a key, value, and effect. As an argument here, it is expressed as key=value:effect.\n\t\t* The key must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores, up to %[1]d characters.\n\t\t* The value must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores, up to %[2]d characters.\n\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n\t\t* Currently taint can only apply to node.\x00\n\t\tView the latest last-applied-configuration annotations by type/name or file.\n\n\t\tThe default output will be printed to stdout in YAML format. One can use -o option\n\t\tto change output format.\x00\n\t # !!!Important Note!!!\n\t # Requires that the \u2018tar\u2019 binary is present in your container\n\t # image. If \u2018tar\u2019 is not present, \u2018kubectl cp\u2019 will fail.\n\n\t # Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in the default namespace\n\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n\n # Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific container\n\t\tkubectl cp /tmp/foo :/tmp/bar -c \n\n\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace \n\t\tkubectl cp /tmp/foo /:/tmp/bar\n\n\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n\t\tkubectl cp /:/tmp/foo /tmp/bar\x00\n\t # Create a new TLS secret named tls-secret with the given key pair:\n\t kubectl create secret tls tls-secret \u2014cert=path/to/tls.cert \u2014key=path/to/tls.key\x00\n\t # Create a new namespace named my-namespace\n\t kubectl create namespace my-namespace\x00\n\t # Create a new secret named my-secret with keys for each file in folder bar\n\t kubectl create secret generic my-secret \u2014from-file=path/to/bar\n\n\t # Create a new secret named my-secret with specified keys instead of names on disk\n\t kubectl create secret generic my-secret \u2014from-file=ssh-privatekey=~/.ssh/id_rsa \u2014from-file=ssh-publickey=~/.ssh/id_rsa.pub\n\n\t # Create a new secret named my-secret with key1=supersecret and key2=topsecret\n\t kubectl create secret generic my-secret \u2014from-literal=key1=supersecret \u2014from-literal=key2=topsecret\x00\n\t # Create a new service account named my-service-account\n\t kubectl create serviceaccount my-service-account\x00\n\t# Create a new ExternalName service named my-ns \n\tkubectl create service externalname my-ns \u2014external-name bar.com\x00\n\tCreate an ExternalName service with the specified name.\n\n\tExternalName service references to an external DNS address instead of\n\tonly pods, which will allow application authors to reference services\n\tthat exist off platform, on other clusters, or locally.\x00\n\tHelp provides help for any command in the application.\n\tSimply type kubectl help [path to command] for full details.\x00\n # Create a new LoadBalancer service named my-lbs\n kubectl create service loadbalancer my-lbs \u2014tcp=5678:8080\x00\n # Create a new clusterIP service named my-cs\n kubectl create service clusterip my-cs \u2014tcp=5678:8080\n\n # Create a new clusterIP service named my-cs (in headless mode)\n kubectl create service clusterip my-cs \u2014clusterip=\u201cNone\u201d\x00\n # Create a new deployment named my-dep that runs the busybox image.\n kubectl create deployment my-dep \u2014image=busybox\x00\n # Create a new nodeport service named my-ns\n kubectl create service nodeport my-ns \u2014tcp=5678:8080\x00\n # Dump current cluster state to stdout\n kubectl cluster-info dump\n\n # Dump current cluster state to /path/to/cluster-state\n kubectl cluster-info dump \u2014output-directory=/path/to/cluster-state\n\n # Dump all namespaces to stdout\n kubectl cluster-info dump \u2014all-namespaces\n\n # Dump a set of namespaces to /path/to/cluster-state\n kubectl cluster-info dump \u2014namespaces default,kube-system \u2014output-directory=/path/to/cluster-state\x00\n # Update pod 'foo' with the annotation 'description' and the value 'my frontend'.\n # If the same annotation is set multiple times, only the last value will be applied\n kubectl annotate pods foo description='my frontend'\n\n # Update a pod identified by type and name in \"pod.json\"\n kubectl annotate -f pod.json description=\u2018my frontend\u2019\n\n # Update pod \u2018foo\u2019 with the annotation \u2018description\u2019 and the value \u2018my frontend running nginx\u2019, overwriting any existing value.\n kubectl annotate \u2014overwrite pods foo description=\u2018my frontend running nginx\u2019\n\n # Update all pods in the namespace\n kubectl annotate pods \u2014all description=\u2018my frontend running nginx\u2019\n\n # Update pod \u2018foo\u2019 only if the resource is unchanged from version 1.\n kubectl annotate pods foo description=\u2018my frontend running nginx\u2019 \u2014resource-version=1\n\n # Update pod \u2018foo\u2019 by removing an annotation named \u2018description\u2019 if it exists.\n # Does not require the \u2014overwrite flag.\n kubectl annotate pods foo description-\x00\n Create a LoadBalancer service with the specified name.\x00\n Create a clusterIP service with the specified name.\x00\n Create a deployment with the specified name.\x00\n Create a nodeport service with the specified name.\x00\n Dumps cluster info out suitable for debugging and diagnosing cluster problems. By default, dumps everything to\n stdout. You can optionally specify a directory with \u2014output-directory. If you specify a directory, kubernetes will\n build a set of files in that directory. By default only dumps things in the \u2018kube-system\u2019 namespace, but you can\n switch to a different namespace with the \u2014namespaces flag, or specify \u2014all-namespaces to dump all namespaces.\n\n The command also dumps the logs of all of the pods in the cluster, these logs are dumped into different directories\n based on namespace and pod name.\x00\n Display addresses of the master and services with label kubernetes.io/cluster-service=true\n To further debug and diagnose cluster problems, use \u2018kubectl cluster-info dump\u2019.\x00A comma-delimited set of quota scopes that must all match each object tracked by the quota.\x00A comma-delimited set of resource=quantity pairs that define a hard limit.\x00A label selector to use for this budget. Only equality-based selector requirements are supported.\x00A label selector to use for this service. Only equality-based selector requirements are supported. If empty (the default) infer the selector from the replication controller or replica set.)\x00A schedule in the Cron format the job should be run with.\x00Additional external IP address (not managed by Kubernetes) to accept for the service. If this IP is routed to a node, the service can be accessed by this IP in addition to its generated service IP.\x00An inline JSON override for the generated object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field.\x00An inline JSON override for the generated service object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field. Only used if \u2014expose is true.\x00\u30d5\u30a1\u30a4\u30eb\u540d\u3092\u6307\u5b9a\u307e\u305f\u306f\u6a19\u6e96\u5165\u529b\u7d4c\u7531\u3067\u30ea\u30bd\u30fc\u30b9\u306b\u30b3\u30f3\u30d5\u30a3\u30b0\u3092\u9069\u7528\u3059\u308b\x00Approve a certificate signing request\x00Assign your own ClusterIP or set to \u2018None\u2019 for a \u2018headless\u2019 service (no loadbalancing).\x00Attach to a running container\x00Auto-scale a Deployment, ReplicaSet, or ReplicationController\x00ClusterIP to be assigned to the service. Leave empty to auto-allocate, or set to \u2018None\u2019 to create a headless service.\x00ClusterRole this ClusterRoleBinding should reference\x00ClusterRole this RoleBinding should reference\x00Container name which will have its image upgraded. Only relevant when \u2014image is specified, ignored otherwise. Required when using \u2014image on a multi-container pod\x00Convert config files between different API versions\x00Copy files and directories to and from containers.\x00Create a ClusterRoleBinding for a particular ClusterRole\x00Create a LoadBalancer service.\x00Create a NodePort service.\x00Create a RoleBinding for a particular Role or ClusterRole\x00Create a TLS secret\x00Create a clusterIP service.\x00Create a configmap from a local file, directory or literal value\x00Create a deployment with the specified name.\x00Create a namespace with the specified name\x00Create a pod disruption budget with the specified name.\x00Create a quota with the specified name.\x00\u30d5\u30a1\u30a4\u30eb\u540d\u3092\u6307\u5b9a\u307e\u305f\u306f\u6a19\u6e96\u5165\u529b\u7d4c\u7531\u3067\u30ea\u30bd\u30fc\u30b9\u3092\u4f5c\u6210\u3059\u308b\x00Create a secret for use with a Docker registry\x00Create a secret from a local file, directory or literal value\x00Create a secret using specified subcommand\x00Create a service account with the specified name\x00Create a service using specified subcommand.\x00Create an ExternalName service.\x00Delete resources by filenames, stdin, resources and names, or by resources and label selector\x00kubeconfig\u304b\u3089\u6307\u5b9a\u3057\u305f\u30af\u30e9\u30b9\u30bf\u30fc\u3092\u524a\u9664\u3059\u308b\x00kubeconfig\u304b\u3089\u6307\u5b9a\u3057\u305f\u30b3\u30f3\u30c6\u30ad\u30b9\u30c8\u3092\u524a\u9664\u3059\u308b\x00Deny a certificate signing request\x00Deprecated: Gracefully shut down a resource by name or filename\x001\u3064\u307e\u305f\u306f\u8907\u6570\u306e\u30b3\u30f3\u30c6\u30ad\u30b9\u30c8\u3092\u8a18\u8ff0\u3059\u308b\x00Display Resource (CPU/Memory) usage of nodes\x00Display Resource (CPU/Memory) usage of pods\x00Display Resource (CPU/Memory) usage.\x00\u30af\u30e9\u30b9\u30bf\u30fc\u306e\u60c5\u5831\u3092\u8868\u793a\u3059\u308b\x00kubeconfig\u3067\u5b9a\u7fa9\u3055\u308c\u305f\u30af\u30e9\u30b9\u30bf\u30fc\u3092\u8868\u793a\u3059\u308b\x00\u30de\u30fc\u30b8\u3055\u308c\u305fkubeconfig\u306e\u8a2d\u5b9a\u307e\u305f\u306f\u6307\u5b9a\u3055\u308c\u305fkubeconfig\u30d5\u30a1\u30a4\u30eb\u3092\u8868\u793a\u3059\u308b\x001\u3064\u307e\u305f\u306f\u8907\u6570\u306e\u30ea\u30bd\u30fc\u30b9\u3092\u8868\u793a\u3059\u308b\x00\u30ab\u30ec\u30f3\u30c8\u30b3\u30f3\u30c6\u30ad\u30b9\u30c8\u3092\u8868\u793a\u3059\u308b\x00\u30ea\u30bd\u30fc\u30b9\u306e\u8aac\u660e\u3092\u8868\u793a\u3059\u308b\x00Drain node in preparation for maintenance\x00Dump lots of relevant info for debugging and diagnosis\x00Edit a resource on the server\x00Email for Docker registry\x00Execute a command in a container\x00Explicit policy for when to pull container images. Required when \u2014image is same as existing image, ignored otherwise.\x00Forward one or more local ports to a pod\x00Help about any command\x00IP to assign to the Load Balancer. If empty, an ephemeral IP will be created and used (cloud-provider specific).\x00If non-empty, set the session affinity for the service to this; legal values: \u2018None\u2019, \u2018ClientIP\u2019\x00If non-empty, the annotation update will only succeed if this is the current resource-version for the object. Only valid when specifying a single resource.\x00If non-empty, the labels update will only succeed if this is the current resource-version for the object. Only valid when specifying a single resource.\x00Image to use for upgrading the replication controller. Must be distinct from the existing image (either new image or new image tag). Can not be used with \u2014filename/-f\x00Manage a deployment rollout\x00Mark node as schedulable\x00Mark node as unschedulable\x00Mark the provided resource as paused\x00Modify certificate resources.\x00kubeconfig\u30d5\u30a1\u30a4\u30eb\u3092\u5909\u66f4\u3059\u308b\x00Name or number for the port on the container that the service should direct traffic to. Optional.\x00Only return logs after a specific date (RFC3339). Defaults to all logs. Only one of since-time / since may be used.\x00Output shell completion code for the specified shell (bash or zsh)\x00Output the formatted object with the given group version (for ex: \u2018extensions/v1beta1\u2019).)\x00Password for Docker registry authentication\x00Path to PEM encoded public key certificate.\x00Path to private key associated with given certificate.\x00Perform a rolling update of the given ReplicationController\x00Precondition for resource version. Requires that the current resource version match this value in order to scale.\x00Print the client and server version information\x00Print the list of flags inherited by all commands\x00Print the logs for a container in a pod\x00Replace a resource by filename or stdin\x00Resume a paused resource\x00Role this RoleBinding should reference\x00Run a particular image on the cluster\x00Run a proxy to the Kubernetes API server\x00Server location for Docker registry\x00Set a new size for a Deployment, ReplicaSet, Replication Controller, or Job\x00Set specific features on objects\x00Set the last-applied-configuration annotation on a live object to match the contents of a file.\x00\u30ea\u30bd\u30fc\u30b9\u306e\u30bb\u30ec\u30af\u30bf\u30fc\u3092\u8a2d\u5b9a\u3059\u308b\x00kubeconfig\u306b\u30af\u30e9\u30b9\u30bf\u30fc\u30a8\u30f3\u30c8\u30ea\u3092\u8a2d\u5b9a\u3059\u308b\x00kubeconfig\u306b\u30b3\u30f3\u30c6\u30ad\u30b9\u30c8\u30a8\u30f3\u30c8\u30ea\u3092\u8a2d\u5b9a\u3059\u308b\x00kubeconfig\u306b\u30e6\u30fc\u30b6\u30fc\u30a8\u30f3\u30c8\u30ea\u3092\u8a2d\u5b9a\u3059\u308b\x00kubeconfig\u30d5\u30a1\u30a4\u30eb\u5185\u306e\u5909\u6570\u3092\u500b\u5225\u306b\u8a2d\u5b9a\u3059\u308b\x00kubeconfig\u306b\u30ab\u30ec\u30f3\u30c8\u30b3\u30f3\u30c6\u30ad\u30b9\u30c8\u3092\u8a2d\u5b9a\u3059\u308b\x00Show details of a specific resource or group of resources\x00Show the status of the rollout\x00Synonym for \u2014target-port\x00Take a replication controller, service, deployment or pod and expose it as a new Kubernetes Service\x00The image for the container to run.\x00The image pull policy for the container. If left empty, this value will not be specified by the client and defaulted by the server\x00The key to use to differentiate between two different controllers, default \u2018deployment\u2019. Only relevant when \u2014image is specified, ignored otherwise\x00The minimum number or percentage of available pods this budget requires.\x00The name for the newly created object.\x00The name for the newly created object. If not specified, the name of the input resource will be used.\x00The name of the API generator to use, see http://kubernetes.io/docs/user-guide/kubectl-conventions/#generators for a list.\x00The name of the API generator to use. Currently there is only 1 generator.\x00The name of the API generator to use. There are 2 generators: \u2018service/v1\u2019 and \u2018service/v2\u2019. The only difference between them is that service port in v1 is named \u2018default\u2019, while it is left unnamed in v2. Default is \u2018service/v2\u2019.\x00The name of the generator to use for creating a service. Only used if \u2014expose is true\x00The network protocol for the service to be created. Default is \u2018TCP\u2019.\x00The port that the service should serve on. Copied from the resource being exposed, if unspecified\x00The port that this container exposes. If \u2014expose is true, this is also the port used by the service that is created.\x00The resource requirement limits for this container. For example, \u2018cpu=200m,memory=512Mi\u2019. Note that server side components may assign limits depending on the server configuration, such as limit ranges.\x00The resource requirement requests for this container. For example, \u2018cpu=100m,memory=256Mi\u2019. Note that server side components may assign requests depending on the server configuration, such as limit ranges.\x00The restart policy for this Pod. Legal values [Always, OnFailure, Never]. If set to \u2018Always\u2019 a deployment is created, if set to \u2018OnFailure\u2019 a job is created, if set to \u2018Never\u2019, a regular pod is created. For the latter two \u2014replicas must be 1. Default \u2018Always\u2019, for CronJobs `Never`.\x00The type of secret to create\x00Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is \u2018ClusterIP\u2019.\x00\u73fe\u5728\u306e\u30ed\u30fc\u30eb\u30a2\u30a6\u30c8\u3092\u53d6\u308a\u6d88\u3059\x00kubeconfig\u30d5\u30a1\u30a4\u30eb\u304b\u3089\u5909\u6570\u3092\u500b\u5225\u306b\u524a\u9664\u3059\u308b\x00Update field(s) of a resource using strategic merge patch\x00Update image of a pod template\x00Update resource requests/limits on objects with pod templates\x00\u30ea\u30bd\u30fc\u30b9\u306e\u30a2\u30ce\u30c6\u30fc\u30b7\u30e7\u30f3\u3092\u66f4\u65b0\u3059\u308b\x00\u30ea\u30bd\u30fc\u30b9\u306e\u30e9\u30d9\u30eb\u3092\u66f4\u65b0\u3059\u308b\x00Update the taints on one or more nodes\x00Username for Docker registry authentication\x00View latest last-applied-configuration annotations of a resource/object\x00\u30ed\u30fc\u30eb\u30a2\u30a6\u30c8\u306e\u5c65\u6b74\u3092\u8868\u793a\u3059\u308b\x00Where to output the files. If empty or \u2018-\u2018 uses stdout, otherwise creates a directory hierarchy in that directory\x00dummy restart flag)\x00external name of service\x00kubectl controls the Kubernetes cluster manager\x00") func translationsKubectlJa_jpLc_messagesK8sMoBytes() ([]byte, error) { return _translationsKubectlJa_jpLc_messagesK8sMo, nil @@ -13927,90 +13927,3311 @@ var _translationsKubectlJa_jpLc_messagesK8sPo = []byte(`# Test translations for msgid "" msgstr "" "Project-Id-Version: gettext-go-examples-hello\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2013-12-12 20:03+0000\n" -"PO-Revision-Date: 2017-01-29 22:54-0800\n" +"Report-Msgid-Bugs-To: EMAIL\n" +"POT-Creation-Date: 2017-03-14 21:32-0700\n" +"PO-Revision-Date: 2019-02-14 10:33+0900\n" "Last-Translator: Giri Kuncoro \n" +"Language-Team: \n" +"Language: ja\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Generator: Poedit 1.6.10\n" +"X-Generator: Poedit 2.1.1\n" "X-Poedit-SourceCharset: UTF-8\n" -"Language-Team: \n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" -"Language: ja\n" + +#: pkg/kubectl/cmd/create_clusterrolebinding.go:35 +msgid "" +"\n" +"\t\t # Create a ClusterRoleBinding for user1, user2, and group1 using the " +"cluster-admin ClusterRole\n" +"\t\t kubectl create clusterrolebinding cluster-admin --clusterrole=cluster-" +"admin --user=user1 --user=user2 --group=group1" +msgstr "" +"\n" +"\t\t # Create a ClusterRoleBinding for user1, user2, and group1 using the " +"cluster-admin ClusterRole\n" +"\t\t kubectl create clusterrolebinding cluster-admin —clusterrole=cluster-" +"admin —user=user1 —user=user2 —group=group1" + +#: pkg/kubectl/cmd/create_rolebinding.go:35 +msgid "" +"\n" +"\t\t # Create a RoleBinding for user1, user2, and group1 using the admin " +"ClusterRole\n" +"\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +"user=user2 --group=group1" +msgstr "" +"\n" +"\t\t # Create a RoleBinding for user1, user2, and group1 using the admin " +"ClusterRole\n" +"\t\t kubectl create rolebinding admin —clusterrole=admin —user=user1 —" +"user=user2 —group=group1" + +#: pkg/kubectl/cmd/create_configmap.go:44 +msgid "" +"\n" +"\t\t # Create a new configmap named my-config based on folder bar\n" +"\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +"\n" +"\t\t # Create a new configmap named my-config with specified keys instead " +"of file basenames on disk\n" +"\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/file1." +"txt --from-file=key2=/path/to/bar/file2.txt\n" +"\n" +"\t\t # Create a new configmap named my-config with key1=config1 and " +"key2=config2\n" +"\t\t kubectl create configmap my-config --from-literal=key1=config1 --from-" +"literal=key2=config2" +msgstr "" +"\n" +"\t\t # Create a new configmap named my-config based on folder bar\n" +"\t\t kubectl create configmap my-config —from-file=path/to/bar\n" +"\n" +"\t\t # Create a new configmap named my-config with specified keys instead " +"of file basenames on disk\n" +"\t\t kubectl create configmap my-config —from-file=key1=/path/to/bar/file1." +"txt —from-file=key2=/path/to/bar/file2.txt\n" +"\n" +"\t\t # Create a new configmap named my-config with key1=config1 and " +"key2=config2\n" +"\t\t kubectl create configmap my-config —from-literal=key1=config1 —from-" +"literal=key2=config2" + +#: pkg/kubectl/cmd/create_secret.go:135 +msgid "" +"\n" +"\t\t # If you don't already have a .dockercfg file, you can create a " +"dockercfg secret directly by using:\n" +"\t\t kubectl create secret docker-registry my-secret --docker-" +"server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +"password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL" +msgstr "" +"\n" +"\t\t # If you don’t already have a .dockercfg file, you can create a " +"dockercfg secret directly by using:\n" +"\t\t kubectl create secret docker-registry my-secret —docker-" +"server=DOCKER_REGISTRY_SERVER —docker-username=DOCKER_USER —docker-" +"password=DOCKER_PASSWORD —docker-email=DOCKER_EMAIL" + +#: pkg/kubectl/cmd/top_node.go:65 +msgid "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" +msgstr "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" + +#: pkg/kubectl/cmd/apply.go:84 +msgid "" +"\n" +"\t\t# Apply the configuration in pod.json to a pod.\n" +"\t\tkubectl apply -f ./pod.json\n" +"\n" +"\t\t# Apply the JSON passed into stdin to a pod.\n" +"\t\tcat pod.json | kubectl apply -f -\n" +"\n" +"\t\t# Note: --prune is still in Alpha\n" +"\t\t# Apply the configuration in manifest.yaml that matches label app=nginx " +"and delete all the other resources that are not in the file and match label " +"app=nginx.\n" +"\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +"\n" +"\t\t# Apply the configuration in manifest.yaml and delete all the other " +"configmaps that are not in the file.\n" +"\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/" +"ConfigMap" +msgstr "" +"\n" +"\t\t# Apply the configuration in pod.json to a pod.\n" +"\t\tkubectl apply -f ./pod.json\n" +"\n" +"\t\t# Apply the JSON passed into stdin to a pod.\n" +"\t\tcat pod.json | kubectl apply -f -\n" +"\n" +"\t\t# Note: —prune is still in Alpha\n" +"\t\t# Apply the configuration in manifest.yaml that matches label app=nginx " +"and delete all the other resources that are not in the file and match label " +"app=nginx.\n" +"\t\tkubectl apply —prune -f manifest.yaml -l app=nginx\n" +"\n" +"\t\t# Apply the configuration in manifest.yaml and delete all the other " +"configmaps that are not in the file.\n" +"\t\tkubectl apply —prune -f manifest.yaml —all —prune-whitelist=core/v1/" +"ConfigMap" + +#: pkg/kubectl/cmd/autoscale.go:40 +#, c-format +msgid "" +"\n" +"\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 and " +"10, no target CPU utilization specified so a default autoscaling policy will " +"be used:\n" +"\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +"\n" +"\t\t# Auto scale a replication controller \"foo\", with the number of pods " +"between 1 and 5, target CPU utilization at 80%:\n" +"\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" +msgstr "" +"\n" +"\t\t# Auto scale a deployment “foo”, with the number of pods between 2 and " +"10, no target CPU utilization specified so a default autoscaling policy will " +"be used:\n" +"\t\tkubectl autoscale deployment foo —min=2 —max=10\n" +"\n" +"\t\t# Auto scale a replication controller “foo”, with the number of pods " +"between 1 and 5, target CPU utilization at 80%:\n" +"\t\tkubectl autoscale rc foo —max=5 —cpu-percent=80" + +#: pkg/kubectl/cmd/convert.go:49 +msgid "" +"\n" +"\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +"\t\tkubectl convert -f pod.yaml\n" +"\n" +"\t\t# Convert the live state of the resource specified by 'pod.yaml' to the " +"latest version\n" +"\t\t# and print to stdout in json format.\n" +"\t\tkubectl convert -f pod.yaml --local -o json\n" +"\n" +"\t\t# Convert all files under current directory to latest version and create " +"them all.\n" +"\t\tkubectl convert -f . | kubectl create -f -" +msgstr "" +"\n" +"\t\t# Convert ‘pod.yaml’ to latest version and print to stdout.\n" +"\t\tkubectl convert -f pod.yaml\n" +"\n" +"\t\t# Convert the live state of the resource specified by ‘pod.yaml’ to the " +"latest version\n" +"\t\t# and print to stdout in json format.\n" +"\t\tkubectl convert -f pod.yaml —local -o json\n" +"\n" +"\t\t# Convert all files under current directory to latest version and create " +"them all.\n" +"\t\tkubectl convert -f . | kubectl create -f -" + +#: pkg/kubectl/cmd/create_clusterrole.go:34 +msgid "" +"\n" +"\t\t# Create a ClusterRole named \"pod-reader\" that allows user to perform " +"\"get\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +"resource=pods\n" +"\n" +"\t\t# Create a ClusterRole named \"pod-reader\" with ResourceName specified\n" +"\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +"resource=pods --resource-name=readablepod" +msgstr "" +"\n" +"\t\t# Create a ClusterRole named “pod-reader” that allows user to perform " +"“get”, “watch” and “list” on pods\n" +"\t\tkubectl create clusterrole pod-reader —verb=get,list,watch —" +"resource=pods\n" +"\n" +"\t\t# Create a ClusterRole named “pod-reader” with ResourceName specified\n" +"\t\tkubectl create clusterrole pod-reader —verb=get,list,watch —" +"resource=pods —resource-name=readablepod" + +#: pkg/kubectl/cmd/create_role.go:41 +msgid "" +"\n" +"\t\t# Create a Role named \"pod-reader\" that allows user to perform \"get" +"\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +"resource=pods\n" +"\n" +"\t\t# Create a Role named \"pod-reader\" with ResourceName specified\n" +"\t\tkubectl create role pod-reader --verb=get --verg=list --verb=watch --" +"resource=pods --resource-name=readablepod" +msgstr "" +"\n" +"\t\t# Create a Role named “pod-reader” that allows user to perform “get”, " +"“watch” and “list” on pods\n" +"\t\tkubectl create role pod-reader —verb=get —verb=list —verb=watch —" +"resource=pods\n" +"\n" +"\t\t# Create a Role named “pod-reader” with ResourceName specified\n" +"\t\tkubectl create role pod-reader —verb=get —verg=list —verb=watch —" +"resource=pods —resource-name=readablepod" + +#: pkg/kubectl/cmd/create_quota.go:35 +msgid "" +"\n" +"\t\t# Create a new resourcequota named my-quota\n" +"\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3," +"replicationcontrollers=2,resourcequotas=1,secrets=5," +"persistentvolumeclaims=10\n" +"\n" +"\t\t# Create a new resourcequota named best-effort\n" +"\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" +msgstr "" +"\n" +"\t\t# Create a new resourcequota named my-quota\n" +"\t\tkubectl create quota my-quota —hard=cpu=1,memory=1G,pods=2,services=3," +"replicationcontrollers=2,resourcequotas=1,secrets=5," +"persistentvolumeclaims=10\n" +"\n" +"\t\t# Create a new resourcequota named best-effort\n" +"\t\tkubectl create quota best-effort —hard=pods=100 —scopes=BestEffort" + +#: pkg/kubectl/cmd/create_pdb.go:35 +#, c-format +msgid "" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=rails label\n" +"\t\t# and require at least one of them being available at any point in " +"time.\n" +"\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +"available=1\n" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=nginx label\n" +"\t\t# and require at least half of the pods selected to be available at any " +"point in time.\n" +"\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" +msgstr "" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=rails label\n" +"\t\t# and require at least one of them being available at any point in " +"time.\n" +"\t\tkubectl create poddisruptionbudget my-pdb —selector=app=rails —min-" +"available=1\n" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=nginx label\n" +"\t\t# and require at least half of the pods selected to be available at any " +"point in time.\n" +"\t\tkubectl create pdb my-pdb —selector=app=nginx —min-available=50%" + +#: pkg/kubectl/cmd/create.go:47 +msgid "" +"\n" +"\t\t# Create a pod using the data in pod.json.\n" +"\t\tkubectl create -f ./pod.json\n" +"\n" +"\t\t# Create a pod based on the JSON passed into stdin.\n" +"\t\tcat pod.json | kubectl create -f -\n" +"\n" +"\t\t# Edit the data in docker-registry.yaml in JSON using the v1 API format " +"then create the resource using the edited data.\n" +"\t\tkubectl create -f docker-registry.yaml --edit --output-version=v1 -o json" +msgstr "" +"\n" +"\t\t# Create a pod using the data in pod.json.\n" +"\t\tkubectl create -f ./pod.json\n" +"\n" +"\t\t# Create a pod based on the JSON passed into stdin.\n" +"\t\tcat pod.json | kubectl create -f -\n" +"\n" +"\t\t# Edit the data in docker-registry.yaml in JSON using the v1 API format " +"then create the resource using the edited data.\n" +"\t\tkubectl create -f docker-registry.yaml —edit —output-version=v1 -o json" + +#: pkg/kubectl/cmd/expose.go:53 +msgid "" +"\n" +"\t\t# Create a service for a replicated nginx, which serves on port 80 and " +"connects to the containers on port 8000.\n" +"\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a replication controller identified by type and " +"name specified in \"nginx-controller.yaml\", which serves on port 80 and " +"connects to the containers on port 8000.\n" +"\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +"the name \"frontend\"\n" +"\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +"\n" +"\t\t# Create a second service based on the above service, exposing the " +"container port 8443 as port 443 with the name \"nginx-https\"\n" +"\t\tkubectl expose service nginx --port=443 --target-port=8443 --name=nginx-" +"https\n" +"\n" +"\t\t# Create a service for a replicated streaming application on port 4100 " +"balancing UDP traffic and named 'video-stream'.\n" +"\t\tkubectl expose rc streamer --port=4100 --protocol=udp --name=video-" +"stream\n" +"\n" +"\t\t# Create a service for a replicated nginx using replica set, which " +"serves on port 80 and connects to the containers on port 8000.\n" +"\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for an nginx deployment, which serves on port 80 and " +"connects to the containers on port 8000.\n" +"\t\tkubectl expose deployment nginx --port=80 --target-port=8000" +msgstr "" +"\n" +"\t\t# Create a service for a replicated nginx, which serves on port 80 and " +"connects to the containers on port 8000.\n" +"\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a replication controller identified by type and " +"name specified in \"nginx-controller.yaml\", which serves on port 80 and " +"connects to the containers on port 8000.\n" +"\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +"the name \"frontend\"\n" +"\t\tkubectl expose pod valid-pod —port=444 —name=frontend\n" +"\n" +"\t\t# Create a second service based on the above service, exposing the " +"container port 8443 as port 443 with the name “nginx-https”\n" +"\t\tkubectl expose service nginx —port=443 —target-port=8443 —name=nginx-" +"https\n" +"\n" +"\t\t# Create a service for a replicated streaming application on port 4100 " +"balancing UDP traffic and named ‘video-stream’.\n" +"\t\tkubectl expose rc streamer —port=4100 —protocol=udp —name=video-stream\n" +"\n" +"\t\t# Create a service for a replicated nginx using replica set, which " +"serves on port 80 and connects to the containers on port 8000.\n" +"\t\tkubectl expose rs nginx —port=80 —target-port=8000\n" +"\n" +"\t\t# Create a service for an nginx deployment, which serves on port 80 and " +"connects to the containers on port 8000.\n" +"\t\tkubectl expose deployment nginx —port=80 —target-port=8000" + +#: pkg/kubectl/cmd/delete.go:68 +msgid "" +"\n" +"\t\t# Delete a pod using the type and name specified in pod.json.\n" +"\t\tkubectl delete -f ./pod.json\n" +"\n" +"\t\t# Delete a pod based on the type and name in the JSON passed into " +"stdin.\n" +"\t\tcat pod.json | kubectl delete -f -\n" +"\n" +"\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +"\t\tkubectl delete pod,service baz foo\n" +"\n" +"\t\t# Delete pods and services with label name=myLabel.\n" +"\t\tkubectl delete pods,services -l name=myLabel\n" +"\n" +"\t\t# Delete a pod with minimal delay\n" +"\t\tkubectl delete pod foo --now\n" +"\n" +"\t\t# Force delete a pod on a dead node\n" +"\t\tkubectl delete pod foo --grace-period=0 --force\n" +"\n" +"\t\t# Delete all pods\n" +"\t\tkubectl delete pods --all" +msgstr "" +"\n" +"\t\t# Delete a pod using the type and name specified in pod.json.\n" +"\t\tkubectl delete -f ./pod.json\n" +"\n" +"\t\t# Delete a pod based on the type and name in the JSON passed into " +"stdin.\n" +"\t\tcat pod.json | kubectl delete -f -\n" +"\n" +"\t\t# Delete pods and services with same names “baz” and “foo”\n" +"\t\tkubectl delete pod,service baz foo\n" +"\n" +"\t\t# Delete pods and services with label name=myLabel.\n" +"\t\tkubectl delete pods,services -l name=myLabel\n" +"\n" +"\t\t# Delete a pod with minimal delay\n" +"\t\tkubectl delete pod foo —now\n" +"\n" +"\t\t# Force delete a pod on a dead node\n" +"\t\tkubectl delete pod foo —grace-period=0 —force\n" +"\n" +"\t\t# Delete all pods\n" +"\t\tkubectl delete pods —all" + +#: pkg/kubectl/cmd/describe.go:54 +msgid "" +"\n" +"\t\t# Describe a node\n" +"\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +"\n" +"\t\t# Describe a pod\n" +"\t\tkubectl describe pods/nginx\n" +"\n" +"\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +"\t\tkubectl describe -f pod.json\n" +"\n" +"\t\t# Describe all pods\n" +"\t\tkubectl describe pods\n" +"\n" +"\t\t# Describe pods by label name=myLabel\n" +"\t\tkubectl describe po -l name=myLabel\n" +"\n" +"\t\t# Describe all pods managed by the 'frontend' replication controller (rc-" +"created pods\n" +"\t\t# get the name of the rc as a prefix in the pod the name).\n" +"\t\tkubectl describe pods frontend" +msgstr "" +"\n" +"\t\t# Describe a node\n" +"\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +"\n" +"\t\t# Describe a pod\n" +"\t\tkubectl describe pods/nginx\n" +"\n" +"\t\t# Describe a pod identified by type and name in “pod.json”\n" +"\t\tkubectl describe -f pod.json\n" +"\n" +"\t\t# Describe all pods\n" +"\t\tkubectl describe pods\n" +"\n" +"\t\t# Describe pods by label name=myLabel\n" +"\t\tkubectl describe po -l name=myLabel\n" +"\n" +"\t\t# Describe all pods managed by the ‘frontend’ replication controller (rc-" +"created pods\n" +"\t\t# get the name of the rc as a prefix in the pod the name).\n" +"\t\tkubectl describe pods frontend" + +#: pkg/kubectl/cmd/drain.go:165 +msgid "" +"\n" +"\t\t# Drain node \"foo\", even if there are pods not managed by a " +"ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet on it.\n" +"\t\t$ kubectl drain foo --force\n" +"\n" +"\t\t# As above, but abort if there are pods not managed by a " +"ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet, and use a " +"grace period of 15 minutes.\n" +"\t\t$ kubectl drain foo --grace-period=900" +msgstr "" +"\n" +"\t\t# Drain node “foo”, even if there are pods not managed by a " +"ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet on it.\n" +"\t\t$ kubectl drain foo —force\n" +"\n" +"\t\t# As above, but abort if there are pods not managed by a " +"ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet, and use a " +"grace period of 15 minutes.\n" +"\t\t$ kubectl drain foo —grace-period=900" + +#: pkg/kubectl/cmd/edit.go:80 +msgid "" +"\n" +"\t\t# Edit the service named 'docker-registry':\n" +"\t\tkubectl edit svc/docker-registry\n" +"\n" +"\t\t# Use an alternative editor\n" +"\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +"\n" +"\t\t# Edit the job 'myjob' in JSON using the v1 API format:\n" +"\t\tkubectl edit job.v1.batch/myjob -o json\n" +"\n" +"\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +"config in its annotation:\n" +"\t\tkubectl edit deployment/mydeployment -o yaml --save-config" +msgstr "" +"\n" +"\t\t# Edit the service named ‘docker-registry’:\n" +"\t\tkubectl edit svc/docker-registry\n" +"\n" +"\t\t# Use an alternative editor\n" +"\t\tKUBE_EDITOR=“nano” kubectl edit svc/docker-registry\n" +"\n" +"\t\t# Edit the job ‘myjob’ in JSON using the v1 API format:\n" +"\t\tkubectl edit job.v1.batch/myjob -o json\n" +"\n" +"\t\t# Edit the deployment ‘mydeployment’ in YAML and save the modified " +"config in its annotation:\n" +"\t\tkubectl edit deployment/mydeployment -o yaml —save-config" + +#: pkg/kubectl/cmd/exec.go:41 +msgid "" +"\n" +"\t\t# Get output from running 'date' from pod 123456-7890, using the first " +"container by default\n" +"\t\tkubectl exec 123456-7890 date\n" +"\n" +"\t\t# Get output from running 'date' in ruby-container from pod 123456-7890\n" +"\t\tkubectl exec 123456-7890 -c ruby-container date\n" +"\n" +"\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-container " +"from pod 123456-7890\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl exec 123456-7890 -c ruby-container -i -t -- bash -il" +msgstr "" +"\n" +"\t\t# Get output from running ‘date’ from pod 123456-7890, using the first " +"container by default\n" +"\t\tkubectl exec 123456-7890 date\n" +"\n" +"\t\t# Get output from running ‘date’ in ruby-container from pod 123456-7890\n" +"\t\tkubectl exec 123456-7890 -c ruby-container date\n" +"\n" +"\t\t# Switch to raw terminal mode, sends stdin to ‘bash’ in ruby-container " +"from pod 123456-7890\n" +"\t\t# and sends stdout/stderr from ‘bash’ back to the client\n" +"\t\tkubectl exec 123456-7890 -c ruby-container -i -t — bash -il" + +#: pkg/kubectl/cmd/attach.go:42 +msgid "" +"\n" +"\t\t# Get output from running pod 123456-7890, using the first container by " +"default\n" +"\t\tkubectl attach 123456-7890\n" +"\n" +"\t\t# Get output from ruby-container from pod 123456-7890\n" +"\t\tkubectl attach 123456-7890 -c ruby-container\n" +"\n" +"\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-container " +"from pod 123456-7890\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +"\n" +"\t\t# Get output from the first pod of a ReplicaSet named nginx\n" +"\t\tkubectl attach rs/nginx\n" +"\t\t" +msgstr "" +"\n" +"\t\t# Get output from running pod 123456-7890, using the first container by " +"default\n" +"\t\tkubectl attach 123456-7890\n" +"\n" +"\t\t# Get output from ruby-container from pod 123456-7890\n" +"\t\tkubectl attach 123456-7890 -c ruby-container\n" +"\n" +"\t\t# Switch to raw terminal mode, sends stdin to ‘bash’ in ruby-container " +"from pod 123456-7890\n" +"\t\t# and sends stdout/stderr from ‘bash’ back to the client\n" +"\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +"\n" +"\t\t# Get output from the first pod of a ReplicaSet named nginx\n" +"\t\tkubectl attach rs/nginx\n" +"\t\t" + +#: pkg/kubectl/cmd/explain.go:39 +msgid "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" +msgstr "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" + +#: pkg/kubectl/cmd/completion.go:65 +msgid "" +"\n" +"\t\t# Install bash completion on a Mac using homebrew\n" +"\t\tbrew install bash-completion\n" +"\t\tprintf \"\n" +"# Bash completion support\n" +"source $(brew --prefix)/etc/bash_completion\n" +"\" >> $HOME/.bash_profile\n" +"\t\tsource $HOME/.bash_profile\n" +"\n" +"\t\t# Load the kubectl completion code for bash into the current shell\n" +"\t\tsource <(kubectl completion bash)\n" +"\n" +"\t\t# Write bash completion code to a file and source if from .bash_profile\n" +"\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +"\t\tprintf \"\n" +"# Kubectl shell completion\n" +"source '$HOME/.kube/completion.bash.inc'\n" +"\" >> $HOME/.bash_profile\n" +"\t\tsource $HOME/.bash_profile\n" +"\n" +"\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +"\t\tsource <(kubectl completion zsh)" +msgstr "" +"\n" +"\t\t# Install bash completion on a Mac using homebrew\n" +"\t\tbrew install bash-completion\n" +"\t\tprintf \"\n" +"# Bash completion support\n" +"source $(brew --prefix)/etc/bash_completion\n" +"\" >> $HOME/.bash_profile\n" +"\t\tsource $HOME/.bash_profile\n" +"\n" +"\t\t# Load the kubectl completion code for bash into the current shell\n" +"\t\tsource <(kubectl completion bash)\n" +"\n" +"\t\t# Write bash completion code to a file and source if from .bash_profile\n" +"\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +"\t\tprintf “\n" +"# Kubectl shell completion\n" +"source ‘$HOME/.kube/completion.bash.inc’\n" +"“ >> $HOME/.bash_profile\n" +"\t\tsource $HOME/.bash_profile\n" +"\n" +"\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +"\t\tsource <(kubectl completion zsh)" + +#: pkg/kubectl/cmd/get.go:64 +msgid "" +"\n" +"\t\t# List all pods in ps output format.\n" +"\t\tkubectl get pods\n" +"\n" +"\t\t# List all pods in ps output format with more information (such as node " +"name).\n" +"\t\tkubectl get pods -o wide\n" +"\n" +"\t\t# List a single replication controller with specified NAME in ps output " +"format.\n" +"\t\tkubectl get replicationcontroller web\n" +"\n" +"\t\t# List a single pod in JSON output format.\n" +"\t\tkubectl get -o json pod web-pod-13je7\n" +"\n" +"\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +"JSON output format.\n" +"\t\tkubectl get -f pod.yaml -o json\n" +"\n" +"\t\t# Return only the phase value of the specified pod.\n" +"\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status.phase}}\n" +"\n" +"\t\t# List all replication controllers and services together in ps output " +"format.\n" +"\t\tkubectl get rc,services\n" +"\n" +"\t\t# List one or more resources by their type and names.\n" +"\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +"\n" +"\t\t# List all resources with different types.\n" +"\t\tkubectl get all" +msgstr "" +"\n" +"\t\t# List all pods in ps output format.\n" +"\t\tkubectl get pods\n" +"\n" +"\t\t# List all pods in ps output format with more information (such as node " +"name).\n" +"\t\tkubectl get pods -o wide\n" +"\n" +"\t\t# List a single replication controller with specified NAME in ps output " +"format.\n" +"\t\tkubectl get replicationcontroller web\n" +"\n" +"\t\t# List a single pod in JSON output format.\n" +"\t\tkubectl get -o json pod web-pod-13je7\n" +"\n" +"\t\t# List a pod identified by type and name specified in “pod.yaml” in JSON " +"output format.\n" +"\t\tkubectl get -f pod.yaml -o json\n" +"\n" +"\t\t# Return only the phase value of the specified pod.\n" +"\t\tkubectl get -o template pod/web-pod-13je7 —template={{.status.phase}}\n" +"\n" +"\t\t# List all replication controllers and services together in ps output " +"format.\n" +"\t\tkubectl get rc,services\n" +"\n" +"\t\t# List one or more resources by their type and names.\n" +"\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +"\n" +"\t\t# List all resources with different types.\n" +"\t\tkubectl get all" + +#: pkg/kubectl/cmd/portforward.go:53 +msgid "" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in the pod\n" +"\t\tkubectl port-forward mypod 5000 6000\n" +"\n" +"\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward mypod 8888:5000\n" +"\n" +"\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward mypod :5000\n" +"\n" +"\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward mypod 0:5000" +msgstr "" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in the pod\n" +"\t\tkubectl port-forward mypod 5000 6000\n" +"\n" +"\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward mypod 8888:5000\n" +"\n" +"\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward mypod :5000\n" +"\n" +"\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward mypod 0:5000" + +#: pkg/kubectl/cmd/drain.go:118 +msgid "" +"\n" +"\t\t# Mark node \"foo\" as schedulable.\n" +"\t\t$ kubectl uncordon foo" +msgstr "" +"\n" +"\t\t# Mark node “foo” as schedulable.\n" +"\t\t$ kubectl uncordon foo" + +#: pkg/kubectl/cmd/drain.go:93 +msgid "" +"\n" +"\t\t# Mark node \"foo\" as unschedulable.\n" +"\t\tkubectl cordon foo" +msgstr "" +"\n" +"\t\t# Mark node “foo” as unschedulable.\n" +"\t\tkubectl cordon foo" + +#: pkg/kubectl/cmd/patch.go:66 +msgid "" +"\n" +"\t\t# Partially update a node using strategic merge patch\n" +"\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Partially update a node identified by the type and name specified in " +"\"node.json\" using strategic merge patch\n" +"\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Update a container's image; spec.containers[*].name is required " +"because it's a merge key\n" +"\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +"\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +"\n" +"\t\t# Update a container's image using a json patch with positional arrays\n" +"\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +"\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" +msgstr "" +"\n" +"\t\t# Partially update a node using strategic merge patch\n" +"\t\tkubectl patch node k8s-node-1 -p ‘{“spec”:{“unschedulable”:true}}’\n" +"\n" +"\t\t# Partially update a node identified by the type and name specified in " +"“node.json” using strategic merge patch\n" +"\t\tkubectl patch -f node.json -p ‘{“spec”:{“unschedulable”:true}}’\n" +"\n" +"\t\t# Update a container’s image; spec.containers[*].name is required " +"because it’s a merge key\n" +"\t\tkubectl patch pod valid-pod -p ‘{“spec”:{“containers”:" +"[{“name”:”kubernetes-serve-hostname”,”image”:”new image”}]}}’\n" +"\n" +"\t\t# Update a container’s image using a json patch with positional arrays\n" +"\t\tkubectl patch pod valid-pod —type=‘json’ -p=‘[{“op”: “replace”, “path”: " +"“/spec/containers/0/image”, “value”:”new image”}]’" + +#: pkg/kubectl/cmd/options.go:29 +msgid "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" +msgstr "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" + +#: pkg/kubectl/cmd/clusterinfo.go:41 +msgid "" +"\n" +"\t\t# Print the address of the master and cluster services\n" +"\t\tkubectl cluster-info" +msgstr "" +"\n" +"\t\t# Print the address of the master and cluster services\n" +"\t\tkubectl cluster-info" + +#: pkg/kubectl/cmd/version.go:32 +msgid "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" +msgstr "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" + +#: pkg/kubectl/cmd/apiversions.go:34 +msgid "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" +msgstr "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" + +#: pkg/kubectl/cmd/replace.go:50 +msgid "" +"\n" +"\t\t# Replace a pod using the data in pod.json.\n" +"\t\tkubectl replace -f ./pod.json\n" +"\n" +"\t\t# Replace a pod based on the JSON passed into stdin.\n" +"\t\tcat pod.json | kubectl replace -f -\n" +"\n" +"\t\t# Update a single-container pod's image version (tag) to v4\n" +"\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' | " +"kubectl replace -f -\n" +"\n" +"\t\t# Force replace, delete and then re-create the resource\n" +"\t\tkubectl replace --force -f ./pod.json" +msgstr "" +"\n" +"\t\t# Replace a pod using the data in pod.json.\n" +"\t\tkubectl replace -f ./pod.json\n" +"\n" +"\t\t# Replace a pod based on the JSON passed into stdin.\n" +"\t\tcat pod.json | kubectl replace -f -\n" +"\n" +"\t\t# Update a single-container pod’s image version (tag) to v4\n" +"\t\tkubectl get pod mypod -o yaml | sed ’s/\\(image: myimage\\):.*$/:v4/‘ | " +"kubectl replace -f -\n" +"\n" +"\t\t# Force replace, delete and then re-create the resource\n" +"\t\tkubectl replace —force -f ./pod.json" + +#: pkg/kubectl/cmd/logs.go:40 +msgid "" +"\n" +"\t\t# Return snapshot logs from pod nginx with only one container\n" +"\t\tkubectl logs nginx\n" +"\n" +"\t\t# Return snapshot logs for the pods defined by label app=nginx\n" +"\t\tkubectl logs -lapp=nginx\n" +"\n" +"\t\t# Return snapshot of previous terminated ruby container logs from pod " +"web-1\n" +"\t\tkubectl logs -p -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +"\t\tkubectl logs -f -c ruby web-1\n" +"\n" +"\t\t# Display only the most recent 20 lines of output in pod nginx\n" +"\t\tkubectl logs --tail=20 nginx\n" +"\n" +"\t\t# Show all logs from pod nginx written in the last hour\n" +"\t\tkubectl logs --since=1h nginx\n" +"\n" +"\t\t# Return snapshot logs from first container of a job named hello\n" +"\t\tkubectl logs job/hello\n" +"\n" +"\t\t# Return snapshot logs from container nginx-1 of a deployment named " +"nginx\n" +"\t\tkubectl logs deployment/nginx -c nginx-1" +msgstr "" +"\n" +"\t\t# Return snapshot logs from pod nginx with only one container\n" +"\t\tkubectl logs nginx\n" +"\n" +"\t\t# Return snapshot logs for the pods defined by label app=nginx\n" +"\t\tkubectl logs -lapp=nginx\n" +"\n" +"\t\t# Return snapshot of previous terminated ruby container logs from pod " +"web-1\n" +"\t\tkubectl logs -p -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +"\t\tkubectl logs -f -c ruby web-1\n" +"\n" +"\t\t# Display only the most recent 20 lines of output in pod nginx\n" +"\t\tkubectl logs —tail=20 nginx\n" +"\n" +"\t\t# Show all logs from pod nginx written in the last hour\n" +"\t\tkubectl logs —since=1h nginx\n" +"\n" +"\t\t# Return snapshot logs from first container of a job named hello\n" +"\t\tkubectl logs job/hello\n" +"\n" +"\t\t# Return snapshot logs from container nginx-1 of a deployment named " +"nginx\n" +"\t\tkubectl logs deployment/nginx -c nginx-1" + +#: pkg/kubectl/cmd/proxy.go:53 +msgid "" +"\n" +"\t\t# Run a proxy to kubernetes apiserver on port 8011, serving static " +"content from ./local/www/\n" +"\t\tkubectl proxy --port=8011 --www=./local/www/\n" +"\n" +"\t\t# Run a proxy to kubernetes apiserver on an arbitrary local port.\n" +"\t\t# The chosen port for the server will be output to stdout.\n" +"\t\tkubectl proxy --port=0\n" +"\n" +"\t\t# Run a proxy to kubernetes apiserver, changing the api prefix to k8s-" +"api\n" +"\t\t# This makes e.g. the pods api available at localhost:8001/k8s-api/v1/" +"pods/\n" +"\t\tkubectl proxy --api-prefix=/k8s-api" +msgstr "" +"\n" +"\t\t# Run a proxy to kubernetes apiserver on port 8011, serving static " +"content from ./local/www/\n" +"\t\tkubectl proxy —port=8011 —www=./local/www/\n" +"\n" +"\t\t# Run a proxy to kubernetes apiserver on an arbitrary local port.\n" +"\t\t# The chosen port for the server will be output to stdout.\n" +"\t\tkubectl proxy —port=0\n" +"\n" +"\t\t# Run a proxy to kubernetes apiserver, changing the api prefix to k8s-" +"api\n" +"\t\t# This makes e.g. the pods api available at localhost:8001/k8s-api/v1/" +"pods/\n" +"\t\tkubectl proxy —api-prefix=/k8s-api" + +#: pkg/kubectl/cmd/scale.go:43 +msgid "" +"\n" +"\t\t# Scale a replicaset named 'foo' to 3.\n" +"\t\tkubectl scale --replicas=3 rs/foo\n" +"\n" +"\t\t# Scale a resource identified by type and name specified in \"foo.yaml\" " +"to 3.\n" +"\t\tkubectl scale --replicas=3 -f foo.yaml\n" +"\n" +"\t\t# If the deployment named mysql's current size is 2, scale mysql to 3.\n" +"\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +"\n" +"\t\t# Scale multiple replication controllers.\n" +"\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +"\n" +"\t\t# Scale job named 'cron' to 3.\n" +"\t\tkubectl scale --replicas=3 job/cron" +msgstr "" +"\n" +"\t\t# Scale a replicaset named ‘foo’ to 3.\n" +"\t\tkubectl scale —replicas=3 rs/foo\n" +"\n" +"\t\t# Scale a resource identified by type and name specified in “foo.yaml” " +"to 3.\n" +"\t\tkubectl scale —replicas=3 -f foo.yaml\n" +"\n" +"\t\t# If the deployment named mysql’s current size is 2, scale mysql to 3.\n" +"\t\tkubectl scale —current-replicas=2 —replicas=3 deployment/mysql\n" +"\n" +"\t\t# Scale multiple replication controllers.\n" +"\t\tkubectl scale —replicas=5 rc/foo rc/bar rc/baz\n" +"\n" +"\t\t# Scale job named ‘cron’ to 3.\n" +"\t\tkubectl scale —replicas=3 job/cron" + +#: pkg/kubectl/cmd/apply_set_last_applied.go:67 +msgid "" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file.\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml\n" +"\n" +"\t\t# Execute set-last-applied against each configuration file in a " +"directory.\n" +"\t\tkubectl apply set-last-applied -f path/\n" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file, will create the annotation if it does not already exist.\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml --create-annotation=true\n" +"\t\t" +msgstr "" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file.\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml\n" +"\n" +"\t\t# Execute set-last-applied against each configuration file in a " +"directory.\n" +"\t\tkubectl apply set-last-applied -f path/\n" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file, will create the annotation if it does not already exist.\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml —create-annotation=true\n" +"\t\t" + +#: pkg/kubectl/cmd/top_pod.go:61 +msgid "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" +msgstr "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod —namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME —containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" + +#: pkg/kubectl/cmd/stop.go:40 +msgid "" +"\n" +"\t\t# Shut down foo.\n" +"\t\tkubectl stop replicationcontroller foo\n" +"\n" +"\t\t# Stop pods and services with label name=myLabel.\n" +"\t\tkubectl stop pods,services -l name=myLabel\n" +"\n" +"\t\t# Shut down the service defined in service.json\n" +"\t\tkubectl stop -f service.json\n" +"\n" +"\t\t# Shut down all resources in the path/to/resources directory\n" +"\t\tkubectl stop -f path/to/resources" +msgstr "" +"\n" +"\t\t# Shut down foo.\n" +"\t\tkubectl stop replicationcontroller foo\n" +"\n" +"\t\t# Stop pods and services with label name=myLabel.\n" +"\t\tkubectl stop pods,services -l name=myLabel\n" +"\n" +"\t\t# Shut down the service defined in service.json\n" +"\t\tkubectl stop -f service.json\n" +"\n" +"\t\t# Shut down all resources in the path/to/resources directory\n" +"\t\tkubectl stop -f path/to/resources" + +#: pkg/kubectl/cmd/run.go:57 +msgid "" +"\n" +"\t\t# Start a single instance of nginx.\n" +"\t\tkubectl run nginx --image=nginx\n" +"\n" +"\t\t# Start a single instance of hazelcast and let the container expose port " +"5701 .\n" +"\t\tkubectl run hazelcast --image=hazelcast --port=5701\n" +"\n" +"\t\t# Start a single instance of hazelcast and set environment variables " +"\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container.\n" +"\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" --" +"env=\"POD_NAMESPACE=default\"\n" +"\n" +"\t\t# Start a replicated instance of nginx.\n" +"\t\tkubectl run nginx --image=nginx --replicas=5\n" +"\n" +"\t\t# Dry run. Print the corresponding API objects without creating them.\n" +"\t\tkubectl run nginx --image=nginx --dry-run\n" +"\n" +"\t\t# Start a single instance of nginx, but overload the spec of the " +"deployment with a partial set of values parsed from JSON.\n" +"\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": \"v1\", " +"\"spec\": { ... } }'\n" +"\n" +"\t\t# Start a pod of busybox and keep it in the foreground, don't restart it " +"if it exits.\n" +"\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +"\n" +"\t\t# Start the nginx container using the default command, but use custom " +"arguments (arg1 .. argN) for that command.\n" +"\t\tkubectl run nginx --image=nginx -- ... \n" +"\n" +"\t\t# Start the nginx container using a different command and custom " +"arguments.\n" +"\t\tkubectl run nginx --image=nginx --command -- ... \n" +"\n" +"\t\t# Start the perl container to compute π to 2000 places and print it " +"out.\n" +"\t\tkubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -" +"wle 'print bpi(2000)'\n" +"\n" +"\t\t# Start the cron job to compute π to 2000 places and print it out every " +"5 minutes.\n" +"\t\tkubectl run pi --schedule=\"0/5 * * * ?\" --image=perl --" +"restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'" +msgstr "" +"\n" +"\t\t# Start a single instance of nginx.\n" +"\t\tkubectl run nginx --image=nginx\n" +"\n" +"\t\t# Start a single instance of hazelcast and let the container expose port " +"5701 .\n" +"\t\tkubectl run hazelcast --image=hazelcast --port=5701\n" +"\n" +"\t\t# Start a single instance of hazelcast and set environment variables " +"\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container.\n" +"\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" --" +"env=\"POD_NAMESPACE=default\"\n" +"\n" +"\t\t# Start a replicated instance of nginx.\n" +"\t\tkubectl run nginx --image=nginx --replicas=5\n" +"\n" +"\t\t# Dry run. Print the corresponding API objects without creating them.\n" +"\t\tkubectl run nginx --image=nginx --dry-run\n" +"\n" +"\t\t# Start a single instance of nginx, but overload the spec of the " +"deployment with a partial set of values parsed from JSON.\n" +"\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": \"v1\", " +"\"spec\": { ... } }'\n" +"\n" +"\t\t# Start a pod of busybox and keep it in the foreground, don't restart it " +"if it exits.\n" +"\t\tkubectl run -i -t busybox —image=busybox —restart=Never\n" +"\n" +"\t\t# Start the nginx container using the default command, but use custom " +"arguments (arg1 .. argN) for that command.\n" +"\t\tkubectl run nginx —image=nginx — \n" +"\n" +"\t\t# Start the nginx container using a different command and custom " +"arguments.\n" +"\t\tkubectl run nginx —image=nginx —command — \n" +"\n" +"\t\t# Start the perl container to compute π to 2000 places and print it " +"out.\n" +"\t\tkubectl run pi —image=perl —restart=OnFailure — perl -Mbignum=bpi -wle " +"‘print bpi(2000)’\n" +"\n" +"\t\t# Start the cron job to compute π to 2000 places and print it out every " +"5 minutes.\n" +"\t\tkubectl run pi —schedule=“0/5 * * * ?” —image=perl —restart=OnFailure — " +"perl -Mbignum=bpi -wle ‘print bpi(2000)’" + +#: pkg/kubectl/cmd/taint.go:67 +msgid "" +"\n" +"\t\t# Update node 'foo' with a taint with key 'dedicated' and value 'special-" +"user' and effect 'NoSchedule'.\n" +"\t\t# If a taint with that key and effect already exists, its value is " +"replaced as specified.\n" +"\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +"\n" +"\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +"'NoSchedule' if one exists.\n" +"\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +"\n" +"\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +"\t\tkubectl taint nodes foo dedicated-" +msgstr "" +"\n" +"\t\t# Update node ‘foo’ with a taint with key ‘dedicated’ and value ‘special-" +"user’ and effect ‘NoSchedule’.\n" +"\t\t# If a taint with that key and effect already exists, its value is " +"replaced as specified.\n" +"\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +"\n" +"\t\t# Remove from node ‘foo’ the taint with key ‘dedicated’ and effect " +"‘NoSchedule’ if one exists.\n" +"\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +"\n" +"\t\t# Remove from node ‘foo’ all the taints with key ‘dedicated’\n" +"\t\tkubectl taint nodes foo dedicated-" + +#: pkg/kubectl/cmd/label.go:77 +msgid "" +"\n" +"\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'.\n" +"\t\tkubectl label pods foo unhealthy=true\n" +"\n" +"\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +"overwriting any existing value.\n" +"\t\tkubectl label --overwrite pods foo status=unhealthy\n" +"\n" +"\t\t# Update all pods in the namespace\n" +"\t\tkubectl label pods --all status=unhealthy\n" +"\n" +"\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +"\t\tkubectl label -f pod.json status=unhealthy\n" +"\n" +"\t\t# Update pod 'foo' only if the resource is unchanged from version 1.\n" +"\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +"\n" +"\t\t# Update pod 'foo' by removing a label named 'bar' if it exists.\n" +"\t\t# Does not require the --overwrite flag.\n" +"\t\tkubectl label pods foo bar-" +msgstr "" +"\n" +"\t\t# Update pod ‘foo’ with the label ‘unhealthy’ and the value ‘true’.\n" +"\t\tkubectl label pods foo unhealthy=true\n" +"\n" +"\t\t# Update pod ‘foo’ with the label ‘status’ and the value ‘unhealthy’, " +"overwriting any existing value.\n" +"\t\tkubectl label —overwrite pods foo status=unhealthy\n" +"\n" +"\t\t# Update all pods in the namespace\n" +"\t\tkubectl label pods —all status=unhealthy\n" +"\n" +"\t\t# Update a pod identified by the type and name in “pod.json”\n" +"\t\tkubectl label -f pod.json status=unhealthy\n" +"\n" +"\t\t# Update pod ‘foo’ only if the resource is unchanged from version 1.\n" +"\t\tkubectl label pods foo status=unhealthy —resource-version=1\n" +"\n" +"\t\t# Update pod ‘foo’ by removing a label named ‘bar’ if it exists.\n" +"\t\t# Does not require the —overwrite flag.\n" +"\t\tkubectl label pods foo bar-" + +#: pkg/kubectl/cmd/rollingupdate.go:54 +msgid "" +"\n" +"\t\t# Update pods of frontend-v1 using new replication controller data in " +"frontend-v2.json.\n" +"\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +"\n" +"\t\t# Update pods of frontend-v1 using JSON data passed into stdin.\n" +"\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +"\n" +"\t\t# Update the pods of frontend-v1 to frontend-v2 by just changing the " +"image, and switching the\n" +"\t\t# name of the replication controller.\n" +"\t\tkubectl rolling-update frontend-v1 frontend-v2 --image=image:v2\n" +"\n" +"\t\t# Update the pods of frontend by just changing the image, and keeping " +"the old name.\n" +"\t\tkubectl rolling-update frontend --image=image:v2\n" +"\n" +"\t\t# Abort and reverse an existing rollout in progress (from frontend-v1 to " +"frontend-v2).\n" +"\t\tkubectl rolling-update frontend-v1 frontend-v2 --rollback" +msgstr "" +"\n" +"\t\t# Update pods of frontend-v1 using new replication controller data in " +"frontend-v2.json.\n" +"\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +"\n" +"\t\t# Update pods of frontend-v1 using JSON data passed into stdin.\n" +"\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +"\n" +"\t\t# Update the pods of frontend-v1 to frontend-v2 by just changing the " +"image, and switching the\n" +"\t\t# name of the replication controller.\n" +"\t\tkubectl rolling-update frontend-v1 frontend-v2 —image=image:v2\n" +"\n" +"\t\t# Update the pods of frontend by just changing the image, and keeping " +"the old name.\n" +"\t\tkubectl rolling-update frontend —image=image:v2\n" +"\n" +"\t\t# Abort and reverse an existing rollout in progress (from frontend-v1 to " +"frontend-v2).\n" +"\t\tkubectl rolling-update frontend-v1 frontend-v2 —rollback" + +#: pkg/kubectl/cmd/apply_view_last_applied.go:52 +msgid "" +"\n" +"\t\t# View the last-applied-configuration annotations by type/name in YAML.\n" +"\t\tkubectl apply view-last-applied deployment/nginx\n" +"\n" +"\t\t# View the last-applied-configuration annotations by file in JSON\n" +"\t\tkubectl apply view-last-applied -f deploy.yaml -o json" +msgstr "" +"\n" +"\t\t# View the last-applied-configuration annotations by type/name in YAML.\n" +"\t\tkubectl apply view-last-applied deployment/nginx\n" +"\n" +"\t\t# View the last-applied-configuration annotations by file in JSON\n" +"\t\tkubectl apply view-last-applied -f deploy.yaml -o json" + +#: pkg/kubectl/cmd/apply.go:75 +msgid "" +"\n" +"\t\tApply a configuration to a resource by filename or stdin.\n" +"\t\tThis resource will be created if it doesn't exist yet.\n" +"\t\tTo use 'apply', always create the resource initially with either 'apply' " +"or 'create --save-config'.\n" +"\n" +"\t\tJSON and YAML formats are accepted.\n" +"\n" +"\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do not " +"use unless you are aware of what the current state is. See https://issues." +"k8s.io/34274." +msgstr "" +"\n" +"\t\tApply a configuration to a resource by filename or stdin.\n" +"\t\tThis resource will be created if it doesn’t exist yet.\n" +"\t\tTo use ‘apply’, always create the resource initially with either ‘apply’ " +"or ‘create —save-config’.\n" +"\n" +"\t\tJSON and YAML formats are accepted.\n" +"\n" +"\t\tAlpha Disclaimer: the —prune functionality is not yet complete. Do not " +"use unless you are aware of what the current state is. See https://issues." +"k8s.io/34274." + +#: pkg/kubectl/cmd/convert.go:38 +msgid "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." +msgstr "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by —output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." + +#: pkg/kubectl/cmd/create_clusterrole.go:31 +msgid "" +"\n" +"\t\tCreate a ClusterRole." +msgstr "" +"\n" +"\t\tCreate a ClusterRole." + +#: pkg/kubectl/cmd/create_clusterrolebinding.go:32 +msgid "" +"\n" +"\t\tCreate a ClusterRoleBinding for a particular ClusterRole." +msgstr "" +"\n" +"\t\tCreate a ClusterRoleBinding for a particular ClusterRole." + +#: pkg/kubectl/cmd/create_rolebinding.go:32 +msgid "" +"\n" +"\t\tCreate a RoleBinding for a particular Role or ClusterRole." +msgstr "" +"\n" +"\t\tCreate a RoleBinding for a particular Role or ClusterRole." + +#: pkg/kubectl/cmd/create_secret.go:200 +msgid "" +"\n" +"\t\tCreate a TLS secret from the given public/private key pair.\n" +"\n" +"\t\tThe public/private key pair must exist before hand. The public key " +"certificate must be .PEM encoded and match the given private key." +msgstr "" +"\n" +"\t\tCreate a TLS secret from the given public/private key pair.\n" +"\n" +"\t\tThe public/private key pair must exist before hand. The public key " +"certificate must be .PEM encoded and match the given private key." + +#: pkg/kubectl/cmd/create_configmap.go:32 +msgid "" +"\n" +"\t\tCreate a configmap based on a file, directory, or specified literal " +"value.\n" +"\n" +"\t\tA single configmap may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a configmap based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key, you may " +"specify an alternate key.\n" +"\n" +"\t\tWhen creating a configmap based on a directory, each file whose basename " +"is a valid key in the directory will be\n" +"\t\tpackaged into the configmap. Any directory entries except regular files " +"are ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." +msgstr "" +"\n" +"\t\tCreate a configmap based on a file, directory, or specified literal " +"value.\n" +"\n" +"\t\tA single configmap may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a configmap based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key, you may " +"specify an alternate key.\n" +"\n" +"\t\tWhen creating a configmap based on a directory, each file whose basename " +"is a valid key in the directory will be\n" +"\t\tpackaged into the configmap. Any directory entries except regular files " +"are ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." + +#: pkg/kubectl/cmd/create_namespace.go:32 +msgid "" +"\n" +"\t\tCreate a namespace with the specified name." +msgstr "" +"\n" +"\t\tCreate a namespace with the specified name." + +#: pkg/kubectl/cmd/create_secret.go:119 +msgid "" +"\n" +"\t\tCreate a new secret for use with Docker registries.\n" +"\n" +"\t\tDockercfg secrets are used to authenticate against Docker registries.\n" +"\n" +"\t\tWhen using the Docker command line to push images, you can authenticate " +"to a given registry by running\n" +"\n" +"\t\t $ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +"password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +"\n" +" That produces a ~/.dockercfg file that is used by subsequent 'docker " +"push' and 'docker pull' commands to\n" +"\t\tauthenticate to the registry. The email address is optional.\n" +"\n" +"\t\tWhen creating applications, you may have a Docker registry that requires " +"authentication. In order for the\n" +"\t\tnodes to pull images on your behalf, they have to have the credentials. " +"You can provide this information\n" +"\t\tby creating a dockercfg secret and attaching it to your service account." +msgstr "" +"\n" +"\t\tCreate a new secret for use with Docker registries.\n" +"\n" +"\t\tDockercfg secrets are used to authenticate against Docker registries.\n" +"\n" +"\t\tWhen using the Docker command line to push images, you can authenticate " +"to a given registry by running\n" +"\n" +"\t\t $ docker login DOCKER_REGISTRY_SERVER —username=DOCKER_USER —" +"password=DOCKER_PASSWORD —email=DOCKER_EMAIL’.\n" +"\n" +" That produces a ~/.dockercfg file that is used by subsequent ‘docker " +"push’ and ‘docker pull’ commands to\n" +"\t\tauthenticate to the registry. The email address is optional.\n" +"\n" +"\t\tWhen creating applications, you may have a Docker registry that requires " +"authentication. In order for the\n" +"\t\tnodes to pull images on your behalf, they have to have the credentials. " +"You can provide this information\n" +"\t\tby creating a dockercfg secret and attaching it to your service account." + +#: pkg/kubectl/cmd/create_pdb.go:32 +msgid "" +"\n" +"\t\tCreate a pod disruption budget with the specified name, selector, and " +"desired minimum available pods" +msgstr "" +"\n" +"\t\tCreate a pod disruption budget with the specified name, selector, and " +"desired minimum available pods" + +#: pkg/kubectl/cmd/create.go:42 +msgid "" +"\n" +"\t\tCreate a resource by filename or stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted." +msgstr "" +"\n" +"\t\tCreate a resource by filename or stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted." + +#: pkg/kubectl/cmd/create_quota.go:32 +msgid "" +"\n" +"\t\tCreate a resourcequota with the specified name, hard limits and optional " +"scopes" +msgstr "" +"\n" +"\t\tCreate a resourcequota with the specified name, hard limits and optional " +"scopes" + +#: pkg/kubectl/cmd/create_role.go:38 +msgid "" +"\n" +"\t\tCreate a role with single rule." +msgstr "" +"\n" +"\t\tCreate a role with single rule." + +#: pkg/kubectl/cmd/create_secret.go:47 +msgid "" +"\n" +"\t\tCreate a secret based on a file, directory, or specified literal value.\n" +"\n" +"\t\tA single secret may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a secret based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key, you may " +"specify an alternate key.\n" +"\n" +"\t\tWhen creating a secret based on a directory, each file whose basename is " +"a valid key in the directory will be\n" +"\t\tpackaged into the secret. Any directory entries except regular files " +"are ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." +msgstr "" +"\n" +"\t\tCreate a secret based on a file, directory, or specified literal value.\n" +"\n" +"\t\tA single secret may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a secret based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key, you may " +"specify an alternate key.\n" +"\n" +"\t\tWhen creating a secret based on a directory, each file whose basename is " +"a valid key in the directory will be\n" +"\t\tpackaged into the secret. Any directory entries except regular files " +"are ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." + +#: pkg/kubectl/cmd/create_serviceaccount.go:32 +msgid "" +"\n" +"\t\tCreate a service account with the specified name." +msgstr "" +"\n" +"\t\tCreate a service account with the specified name." + +#: pkg/kubectl/cmd/run.go:52 +msgid "" +"\n" +"\t\tCreate and run a particular image, possibly replicated.\n" +"\n" +"\t\tCreates a deployment or job to manage the created container(s)." +msgstr "" +"\n" +"\t\tCreate and run a particular image, possibly replicated.\n" +"\n" +"\t\tCreates a deployment or job to manage the created container(s)." + +#: pkg/kubectl/cmd/autoscale.go:34 +msgid "" +"\n" +"\t\tCreates an autoscaler that automatically chooses and sets the number of " +"pods that run in a kubernetes cluster.\n" +"\n" +"\t\tLooks up a Deployment, ReplicaSet, or ReplicationController by name and " +"creates an autoscaler that uses the given resource as a reference.\n" +"\t\tAn autoscaler can automatically increase or decrease number of pods " +"deployed within the system as needed." +msgstr "" +"\n" +"\t\tCreates an autoscaler that automatically chooses and sets the number of " +"pods that run in a kubernetes cluster.\n" +"\n" +"\t\tLooks up a Deployment, ReplicaSet, or ReplicationController by name and " +"creates an autoscaler that uses the given resource as a reference.\n" +"\t\tAn autoscaler can automatically increase or decrease number of pods " +"deployed within the system as needed." + +#: pkg/kubectl/cmd/delete.go:40 +msgid "" +"\n" +"\t\tDelete resources by filenames, stdin, resources and names, or by " +"resources and label selector.\n" +"\n" +"\t\tJSON and YAML formats are accepted. Only one type of the arguments may " +"be specified: filenames,\n" +"\t\tresources and names, or resources and label selector.\n" +"\n" +"\t\tSome resources, such as pods, support graceful deletion. These resources " +"define a default period\n" +"\t\tbefore they are forcibly terminated (the grace period) but you may " +"override that value with\n" +"\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +"Because these resources often\n" +"\t\trepresent entities in the cluster, deletion may not be acknowledged " +"immediately. If the node\n" +"\t\thosting a pod is down or cannot reach the API server, termination may " +"take significantly longer\n" +"\t\tthan the grace period. To force delete a resource,\tyou must pass a grace" +"\tperiod of 0 and specify\n" +"\t\tthe --force flag.\n" +"\n" +"\t\tIMPORTANT: Force deleting pods does not wait for confirmation that the " +"pod's processes have been\n" +"\t\tterminated, which can leave those processes running until the node " +"detects the deletion and\n" +"\t\tcompletes graceful deletion. If your processes use shared storage or " +"talk to a remote API and\n" +"\t\tdepend on the name of the pod to identify themselves, force deleting " +"those pods may result in\n" +"\t\tmultiple processes running on different machines using the same " +"identification which may lead\n" +"\t\tto data corruption or inconsistency. Only force delete pods when you are " +"sure the pod is\n" +"\t\tterminated, or if your application can tolerate multiple copies of the " +"same pod running at once.\n" +"\t\tAlso, if you force delete pods the scheduler may place new pods on those " +"nodes before the node\n" +"\t\thas released those resources and causing those pods to be evicted " +"immediately.\n" +"\n" +"\t\tNote that the delete command does NOT do resource version checks, so if " +"someone\n" +"\t\tsubmits an update to a resource right when you submit a delete, their " +"update\n" +"\t\twill be lost along with the rest of the resource." +msgstr "" +"\n" +"\t\tDelete resources by filenames, stdin, resources and names, or by " +"resources and label selector.\n" +"\n" +"\t\tJSON and YAML formats are accepted. Only one type of the arguments may " +"be specified: filenames,\n" +"\t\tresources and names, or resources and label selector.\n" +"\n" +"\t\tSome resources, such as pods, support graceful deletion. These resources " +"define a default period\n" +"\t\tbefore they are forcibly terminated (the grace period) but you may " +"override that value with\n" +"\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +"Because these resources often\n" +"\t\trepresent entities in the cluster, deletion may not be acknowledged " +"immediately. If the node\n" +"\t\thosting a pod is down or cannot reach the API server, termination may " +"take significantly longer\n" +"\t\tthan the grace period. To force delete a resource,\tyou must pass a grace" +"\tperiod of 0 and specify\n" +"\t\tthe --force flag.\n" +"\n" +"\t\tIMPORTANT: Force deleting pods does not wait for confirmation that the " +"pod’s processes have been\n" +"\t\tterminated, which can leave those processes running until the node " +"detects the deletion and\n" +"\t\tcompletes graceful deletion. If your processes use shared storage or " +"talk to a remote API and\n" +"\t\tdepend on the name of the pod to identify themselves, force deleting " +"those pods may result in\n" +"\t\tmultiple processes running on different machines using the same " +"identification which may lead\n" +"\t\tto data corruption or inconsistency. Only force delete pods when you are " +"sure the pod is\n" +"\t\tterminated, or if your application can tolerate multiple copies of the " +"same pod running at once.\n" +"\t\tAlso, if you force delete pods the scheduler may place new pods on those " +"nodes before the node\n" +"\t\thas released those resources and causing those pods to be evicted " +"immediately.\n" +"\n" +"\t\tNote that the delete command does NOT do resource version checks, so if " +"someone\n" +"\t\tsubmits an update to a resource right when you submit a delete, their " +"update\n" +"\t\twill be lost along with the rest of the resource." + +#: pkg/kubectl/cmd/stop.go:31 +msgid "" +"\n" +"\t\tDeprecated: Gracefully shut down a resource by name or filename.\n" +"\n" +"\t\tThe stop command is deprecated, all its functionalities are covered by " +"delete command.\n" +"\t\tSee 'kubectl delete --help' for more details.\n" +"\n" +"\t\tAttempts to shut down and delete a resource that supports graceful " +"termination.\n" +"\t\tIf the resource is scalable it will be scaled to 0 before deletion." +msgstr "" +"\n" +"\t\tDeprecated: Gracefully shut down a resource by name or filename.\n" +"\n" +"\t\tThe stop command is deprecated, all its functionalities are covered by " +"delete command.\n" +"\t\tSee ‘kubectl delete —help’ for more details.\n" +"\n" +"\t\tAttempts to shut down and delete a resource that supports graceful " +"termination.\n" +"\t\tIf the resource is scalable it will be scaled to 0 before deletion." + +#: pkg/kubectl/cmd/top_node.go:60 +msgid "" +"\n" +"\t\tDisplay Resource (CPU/Memory/Storage) usage of nodes.\n" +"\n" +"\t\tThe top-node command allows you to see the resource consumption of nodes." +msgstr "" +"\n" +"\t\tDisplay Resource (CPU/Memory/Storage) usage of nodes.\n" +"\n" +"\t\tThe top-node command allows you to see the resource consumption of nodes." + +#: pkg/kubectl/cmd/top_pod.go:53 +msgid "" +"\n" +"\t\tDisplay Resource (CPU/Memory/Storage) usage of pods.\n" +"\n" +"\t\tThe 'top pod' command allows you to see the resource consumption of " +"pods.\n" +"\n" +"\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +"minutes\n" +"\t\tsince pod creation." +msgstr "" +"\n" +"\t\tDisplay Resource (CPU/Memory/Storage) usage of pods.\n" +"\n" +"\t\tThe ‘top pod’ command allows you to see the resource consumption of " +"pods.\n" +"\n" +"\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +"minutes\n" +"\t\tsince pod creation." + +#: pkg/kubectl/cmd/top.go:33 +msgid "" +"\n" +"\t\tDisplay Resource (CPU/Memory/Storage) usage.\n" +"\n" +"\t\tThe top command allows you to see the resource consumption for nodes or " +"pods.\n" +"\n" +"\t\tThis command requires Heapster to be correctly configured and working on " +"the server. " +msgstr "" +"\n" +"\t\tDisplay Resource (CPU/Memory/Storage) usage.\n" +"\n" +"\t\tThe top command allows you to see the resource consumption for nodes or " +"pods.\n" +"\n" +"\t\tThis command requires Heapster to be correctly configured and working on " +"the server. " + +#: pkg/kubectl/cmd/drain.go:140 +msgid "" +"\n" +"\t\tDrain node in preparation for maintenance.\n" +"\n" +"\t\tThe given node will be marked unschedulable to prevent new pods from " +"arriving.\n" +"\t\t'drain' evicts the pods if the APIServer supports eviction\n" +"\t\t(http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will use " +"normal DELETE\n" +"\t\tto delete the pods.\n" +"\t\tThe 'drain' evicts or deletes all pods except mirror pods (which cannot " +"be deleted through\n" +"\t\tthe API server). If there are DaemonSet-managed pods, drain will not " +"proceed\n" +"\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +"\t\tDaemonSet-managed pods, because those pods would be immediately replaced " +"by the\n" +"\t\tDaemonSet controller, which ignores unschedulable markings. If there " +"are any\n" +"\t\tpods that are neither mirror pods nor managed by ReplicationController,\n" +"\t\tReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete " +"any pods unless you\n" +"\t\tuse --force. --force will also allow deletion to proceed if the " +"managing resource of one\n" +"\t\tor more pods is missing.\n" +"\n" +"\t\t'drain' waits for graceful termination. You should not operate on the " +"machine until\n" +"\t\tthe command completes.\n" +"\n" +"\t\tWhen you are ready to put the node back into service, use kubectl " +"uncordon, which\n" +"\t\twill make the node schedulable again.\n" +"\n" +"\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" +msgstr "" +"\n" +"\t\tDrain node in preparation for maintenance.\n" +"\n" +"\t\tThe given node will be marked unschedulable to prevent new pods from " +"arriving.\n" +"\t\t'drain' evicts the pods if the APIServer supports eviction\n" +"\t\t(http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will use " +"normal DELETE\n" +"\t\tto delete the pods.\n" +"\t\tThe 'drain' evicts or deletes all pods except mirror pods (which cannot " +"be deleted through\n" +"\t\tthe API server). If there are DaemonSet-managed pods, drain will not " +"proceed\n" +"\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +"\t\tDaemonSet-managed pods, because those pods would be immediately replaced " +"by the\n" +"\t\tDaemonSet controller, which ignores unschedulable markings. If there " +"are any\n" +"\t\tpods that are neither mirror pods nor managed by ReplicationController,\n" +"\t\tReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete " +"any pods unless you\n" +"\t\tuse —force. —force will also allow deletion to proceed if the managing " +"resource of one\n" +"\t\tor more pods is missing.\n" +"\n" +"\t\t’drain’ waits for graceful termination. You should not operate on the " +"machine until\n" +"\t\tthe command completes.\n" +"\n" +"\t\tWhen you are ready to put the node back into service, use kubectl " +"uncordon, which\n" +"\t\twill make the node schedulable again.\n" +"\n" +"\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" + +#: pkg/kubectl/cmd/edit.go:56 +msgid "" +"\n" +"\t\tEdit a resource from the default editor.\n" +"\n" +"\t\tThe edit command allows you to directly edit any API resource you can " +"retrieve via the\n" +"\t\tcommand line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts filenames as well as command line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tEditing is done with the API version used to fetch the resource.\n" +"\t\tTo edit using a specific API version, fully-qualify the resource, " +"version, and group.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +"\n" +"\t\tThe flag --windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." +msgstr "" +"\n" +"\t\tEdit a resource from the default editor.\n" +"\n" +"\t\tThe edit command allows you to directly edit any API resource you can " +"retrieve via the\n" +"\t\tcommand line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts filenames as well as command line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tEditing is done with the API version used to fetch the resource.\n" +"\t\tTo edit using a specific API version, fully-qualify the resource, " +"version, and group.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify “-o json”.\n" +"\n" +"\t\tThe flag —windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." + +#: pkg/kubectl/cmd/drain.go:115 +msgid "" +"\n" +"\t\tMark node as schedulable." +msgstr "" +"\n" +"\t\tMark node as schedulable." + +#: pkg/kubectl/cmd/drain.go:90 +msgid "" +"\n" +"\t\tMark node as unschedulable." +msgstr "" +"\n" +"\t\tMark node as unschedulable." + +#: pkg/kubectl/cmd/completion.go:47 +msgid "" +"\n" +"\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +"\t\tThe shell code must be evaluated to provide interactive\n" +"\t\tcompletion of kubectl commands. This can be done by sourcing it from\n" +"\t\tthe .bash_profile.\n" +"\n" +"\t\tNote: this requires the bash-completion framework, which is not " +"installed\n" +"\t\tby default on Mac. This can be installed by using homebrew:\n" +"\n" +"\t\t $ brew install bash-completion\n" +"\n" +"\t\tOnce installed, bash_completion must be evaluated. This can be done by " +"adding the\n" +"\t\tfollowing line to the .bash_profile\n" +"\n" +"\t\t $ source $(brew --prefix)/etc/bash_completion\n" +"\n" +"\t\tNote for zsh users: [1] zsh completions are only supported in versions " +"of zsh >= 5.2" +msgstr "" +"\n" +"\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +"\t\tThe shell code must be evaluated to provide interactive\n" +"\t\tcompletion of kubectl commands. This can be done by sourcing it from\n" +"\t\tthe .bash_profile.\n" +"\n" +"\t\tNote: this requires the bash-completion framework, which is not " +"installed\n" +"\t\tby default on Mac. This can be installed by using homebrew:\n" +"\n" +"\t\t $ brew install bash-completion\n" +"\n" +"\t\tOnce installed, bash_completion must be evaluated. This can be done by " +"adding the\n" +"\t\tfollowing line to the .bash_profile\n" +"\n" +"\t\t $ source $(brew —prefix)/etc/bash_completion\n" +"\n" +"\t\tNote for zsh users: [1] zsh completions are only supported in versions " +"of zsh >= 5.2" + +#: pkg/kubectl/cmd/rollingupdate.go:45 +msgid "" +"\n" +"\t\tPerform a rolling update of the given ReplicationController.\n" +"\n" +"\t\tReplaces the specified replication controller with a new replication " +"controller by updating one pod at a time to use the\n" +"\t\tnew PodTemplate. The new-controller.json must specify the same namespace " +"as the\n" +"\t\texisting replication controller and overwrite at least one (common) " +"label in its replicaSelector.\n" +"\n" +"\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate.svg)" +msgstr "" +"\n" +"\t\tPerform a rolling update of the given ReplicationController.\n" +"\n" +"\t\tReplaces the specified replication controller with a new replication " +"controller by updating one pod at a time to use the\n" +"\t\tnew PodTemplate. The new-controller.json must specify the same namespace " +"as the\n" +"\t\texisting replication controller and overwrite at least one (common) " +"label in its replicaSelector.\n" +"\n" +"\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate.svg)" + +#: pkg/kubectl/cmd/replace.go:40 +msgid "" +"\n" +"\t\tReplace a resource by filename or stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted. If replacing an existing resource, " +"the\n" +"\t\tcomplete resource spec must be provided. This can be obtained by\n" +"\n" +"\t\t $ kubectl get TYPE NAME -o yaml\n" +"\n" +"\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +"github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/definitions." +"html to find if a field is mutable." +msgstr "" +"\n" +"\t\tReplace a resource by filename or stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted. If replacing an existing resource, " +"the\n" +"\t\tcomplete resource spec must be provided. This can be obtained by\n" +"\n" +"\t\t $ kubectl get TYPE NAME -o yaml\n" +"\n" +"\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +"github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/definitions." +"html to find if a field is mutable." + +#: pkg/kubectl/cmd/scale.go:34 +msgid "" +"\n" +"\t\tSet a new size for a Deployment, ReplicaSet, Replication Controller, or " +"Job.\n" +"\n" +"\t\tScale also allows users to specify one or more preconditions for the " +"scale action.\n" +"\n" +"\t\tIf --current-replicas or --resource-version is specified, it is " +"validated before the\n" +"\t\tscale is attempted, and it is guaranteed that the precondition holds " +"true when the\n" +"\t\tscale is sent to the server." +msgstr "" +"\n" +"\t\tSet a new size for a Deployment, ReplicaSet, Replication Controller, or " +"Job.\n" +"\n" +"\t\tScale also allows users to specify one or more preconditions for the " +"scale action.\n" +"\n" +"\t\tIf —current-replicas or —resource-version is specified, it is validated " +"before the\n" +"\t\tscale is attempted, and it is guaranteed that the precondition holds " +"true when the\n" +"\t\tscale is sent to the server." + +#: pkg/kubectl/cmd/apply_set_last_applied.go:62 +msgid "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." +msgstr "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"‘kubectl apply -f ’ was run,\n" +"\t\twithout updating any other parts of the object." + +#: pkg/kubectl/cmd/proxy.go:36 +msgid "" +"\n" +"\t\tTo proxy all of the kubernetes api and nothing else, use:\n" +"\n" +"\t\t $ kubectl proxy --api-prefix=/\n" +"\n" +"\t\tTo proxy only part of the kubernetes api and also some static files:\n" +"\n" +"\t\t $ kubectl proxy --www=/my/files --www-prefix=/static/ --api-prefix=/" +"api/\n" +"\n" +"\t\tThe above lets you 'curl localhost:8001/api/v1/pods'.\n" +"\n" +"\t\tTo proxy the entire kubernetes api at a different root, use:\n" +"\n" +"\t\t $ kubectl proxy --api-prefix=/custom/\n" +"\n" +"\t\tThe above lets you 'curl localhost:8001/custom/api/v1/pods'" +msgstr "" +"\n" +"\t\tTo proxy all of the kubernetes api and nothing else, use:\n" +"\n" +"\t\t $ kubectl proxy —api-prefix=/\n" +"\n" +"\t\tTo proxy only part of the kubernetes api and also some static files:\n" +"\n" +"\t\t $ kubectl proxy —www=/my/files —www-prefix=/static/ —api-prefix=/" +"api/\n" +"\n" +"\t\tThe above lets you ‘curl localhost:8001/api/v1/pods’.\n" +"\n" +"\t\tTo proxy the entire kubernetes api at a different root, use:\n" +"\n" +"\t\t $ kubectl proxy —api-prefix=/custom/\n" +"\n" +"\t\tThe above lets you ‘curl localhost:8001/custom/api/v1/pods’" + +#: pkg/kubectl/cmd/patch.go:59 +msgid "" +"\n" +"\t\tUpdate field(s) of a resource using strategic merge patch\n" +"\n" +"\t\tJSON and YAML formats are accepted.\n" +"\n" +"\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +"github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/definitions." +"html to find if a field is mutable." +msgstr "" +"\n" +"\t\tUpdate field(s) of a resource using strategic merge patch\n" +"\n" +"\t\tJSON and YAML formats are accepted.\n" +"\n" +"\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +"github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/definitions." +"html to find if a field is mutable." + +#: pkg/kubectl/cmd/label.go:70 +#, c-format +msgid "" +"\n" +"\t\tUpdate the labels on a resource.\n" +"\n" +"\t\t* A label must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\t* If --overwrite is true, then existing labels can be overwritten, " +"otherwise attempting to overwrite a label will result in an error.\n" +"\t\t* If --resource-version is specified, then updates will use this " +"resource version, otherwise the existing resource-version will be used." +msgstr "" +"\n" +"\t\tUpdate the labels on a resource.\n" +"\n" +"\t\t* A label must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\t* If —overwrite is true, then existing labels can be overwritten, " +"otherwise attempting to overwrite a label will result in an error.\n" +"\t\t* If —resource-version is specified, then updates will use this resource " +"version, otherwise the existing resource-version will be used." + +#: pkg/kubectl/cmd/taint.go:58 +#, c-format +msgid "" +"\n" +"\t\tUpdate the taints on one or more nodes.\n" +"\n" +"\t\t* A taint consists of a key, value, and effect. As an argument here, it " +"is expressed as key=value:effect.\n" +"\t\t* The key must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\t* The value must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[2]d characters.\n" +"\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +"\t\t* Currently taint can only apply to node." +msgstr "" +"\n" +"\t\tUpdate the taints on one or more nodes.\n" +"\n" +"\t\t* A taint consists of a key, value, and effect. As an argument here, it " +"is expressed as key=value:effect.\n" +"\t\t* The key must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\t* The value must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[2]d characters.\n" +"\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +"\t\t* Currently taint can only apply to node." + +#: pkg/kubectl/cmd/apply_view_last_applied.go:46 +msgid "" +"\n" +"\t\tView the latest last-applied-configuration annotations by type/name or " +"file.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change output format." +msgstr "" +"\n" +"\t\tView the latest last-applied-configuration annotations by type/name or " +"file.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change output format." + +#: pkg/kubectl/cmd/cp.go:37 +msgid "" +"\n" +"\t # !!!Important Note!!!\n" +"\t # Requires that the 'tar' binary is present in your container\n" +"\t # image. If 'tar' is not present, 'kubectl cp' will fail.\n" +"\n" +"\t # Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in " +"the default namespace\n" +"\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +"\n" +" # Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific " +"container\n" +"\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl cp /:/tmp/foo /tmp/bar" +msgstr "" +"\n" +"\t # !!!Important Note!!!\n" +"\t # Requires that the ‘tar’ binary is present in your container\n" +"\t # image. If ‘tar’ is not present, ‘kubectl cp’ will fail.\n" +"\n" +"\t # Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in " +"the default namespace\n" +"\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +"\n" +" # Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific " +"container\n" +"\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl cp /:/tmp/foo /tmp/bar" + +#: pkg/kubectl/cmd/create_secret.go:205 +msgid "" +"\n" +"\t # Create a new TLS secret named tls-secret with the given key pair:\n" +"\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --key=path/" +"to/tls.key" +msgstr "" +"\n" +"\t # Create a new TLS secret named tls-secret with the given key pair:\n" +"\t kubectl create secret tls tls-secret —cert=path/to/tls.cert —key=path/to/" +"tls.key" + +#: pkg/kubectl/cmd/create_namespace.go:35 +msgid "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" +msgstr "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" + +#: pkg/kubectl/cmd/create_secret.go:59 +msgid "" +"\n" +"\t # Create a new secret named my-secret with keys for each file in folder " +"bar\n" +"\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +"\n" +"\t # Create a new secret named my-secret with specified keys instead of " +"names on disk\n" +"\t kubectl create secret generic my-secret --from-file=ssh-privatekey=~/." +"ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +"\n" +"\t # Create a new secret named my-secret with key1=supersecret and " +"key2=topsecret\n" +"\t kubectl create secret generic my-secret --from-literal=key1=supersecret " +"--from-literal=key2=topsecret" +msgstr "" +"\n" +"\t # Create a new secret named my-secret with keys for each file in folder " +"bar\n" +"\t kubectl create secret generic my-secret —from-file=path/to/bar\n" +"\n" +"\t # Create a new secret named my-secret with specified keys instead of " +"names on disk\n" +"\t kubectl create secret generic my-secret —from-file=ssh-privatekey=~/.ssh/" +"id_rsa —from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +"\n" +"\t # Create a new secret named my-secret with key1=supersecret and " +"key2=topsecret\n" +"\t kubectl create secret generic my-secret —from-literal=key1=supersecret —" +"from-literal=key2=topsecret" + +#: pkg/kubectl/cmd/create_serviceaccount.go:35 +msgid "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" +msgstr "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" + +#: pkg/kubectl/cmd/create_service.go:232 +msgid "" +"\n" +"\t# Create a new ExternalName service named my-ns \n" +"\tkubectl create service externalname my-ns --external-name bar.com" +msgstr "" +"\n" +"\t# Create a new ExternalName service named my-ns \n" +"\tkubectl create service externalname my-ns —external-name bar.com" + +#: pkg/kubectl/cmd/create_service.go:225 +msgid "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." +msgstr "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." + +#: pkg/kubectl/cmd/help.go:30 +msgid "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." +msgstr "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." + +#: pkg/kubectl/cmd/create_service.go:173 +msgid "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" +msgstr "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs —tcp=5678:8080" + +#: pkg/kubectl/cmd/create_service.go:53 +msgid "" +"\n" +" # Create a new clusterIP service named my-cs\n" +" kubectl create service clusterip my-cs --tcp=5678:8080\n" +"\n" +" # Create a new clusterIP service named my-cs (in headless mode)\n" +" kubectl create service clusterip my-cs --clusterip=\"None\"" +msgstr "" +"\n" +" # Create a new clusterIP service named my-cs\n" +" kubectl create service clusterip my-cs —tcp=5678:8080\n" +"\n" +" # Create a new clusterIP service named my-cs (in headless mode)\n" +" kubectl create service clusterip my-cs —clusterip=“None”" + +#: pkg/kubectl/cmd/create_deployment.go:36 +msgid "" +"\n" +" # Create a new deployment named my-dep that runs the busybox image.\n" +" kubectl create deployment my-dep --image=busybox" +msgstr "" +"\n" +" # Create a new deployment named my-dep that runs the busybox image.\n" +" kubectl create deployment my-dep —image=busybox" + +#: pkg/kubectl/cmd/create_service.go:116 +msgid "" +"\n" +" # Create a new nodeport service named my-ns\n" +" kubectl create service nodeport my-ns --tcp=5678:8080" +msgstr "" +"\n" +" # Create a new nodeport service named my-ns\n" +" kubectl create service nodeport my-ns —tcp=5678:8080" + +#: pkg/kubectl/cmd/clusterinfo_dump.go:62 +msgid "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" +msgstr "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump —output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump —all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump —namespaces default,kube-system —output-" +"directory=/path/to/cluster-state" + +#: pkg/kubectl/cmd/annotate.go:78 +msgid "" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend'.\n" +" # If the same annotation is set multiple times, only the last value will " +"be applied\n" +" kubectl annotate pods foo description='my frontend'\n" +"\n" +" # Update a pod identified by type and name in \"pod.json\"\n" +" kubectl annotate -f pod.json description='my frontend'\n" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend running nginx', overwriting any existing value.\n" +" kubectl annotate --overwrite pods foo description='my frontend running " +"nginx'\n" +"\n" +" # Update all pods in the namespace\n" +" kubectl annotate pods --all description='my frontend running nginx'\n" +"\n" +" # Update pod 'foo' only if the resource is unchanged from version 1.\n" +" kubectl annotate pods foo description='my frontend running nginx' --" +"resource-version=1\n" +"\n" +" # Update pod 'foo' by removing an annotation named 'description' if it " +"exists.\n" +" # Does not require the --overwrite flag.\n" +" kubectl annotate pods foo description-" +msgstr "" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend'.\n" +" # If the same annotation is set multiple times, only the last value will " +"be applied\n" +" kubectl annotate pods foo description='my frontend'\n" +"\n" +" # Update a pod identified by type and name in \"pod.json\"\n" +" kubectl annotate -f pod.json description=‘my frontend’\n" +"\n" +" # Update pod ‘foo’ with the annotation ‘description’ and the value ‘my " +"frontend running nginx’, overwriting any existing value.\n" +" kubectl annotate —overwrite pods foo description=‘my frontend running " +"nginx’\n" +"\n" +" # Update all pods in the namespace\n" +" kubectl annotate pods —all description=‘my frontend running nginx’\n" +"\n" +" # Update pod ‘foo’ only if the resource is unchanged from version 1.\n" +" kubectl annotate pods foo description=‘my frontend running nginx’ —" +"resource-version=1\n" +"\n" +" # Update pod ‘foo’ by removing an annotation named ‘description’ if it " +"exists.\n" +" # Does not require the —overwrite flag.\n" +" kubectl annotate pods foo description-" + +#: pkg/kubectl/cmd/create_service.go:170 +msgid "" +"\n" +" Create a LoadBalancer service with the specified name." +msgstr "" +"\n" +" Create a LoadBalancer service with the specified name." + +#: pkg/kubectl/cmd/create_service.go:50 +msgid "" +"\n" +" Create a clusterIP service with the specified name." +msgstr "" +"\n" +" Create a clusterIP service with the specified name." + +#: pkg/kubectl/cmd/create_deployment.go:33 +msgid "" +"\n" +" Create a deployment with the specified name." +msgstr "" +"\n" +" Create a deployment with the specified name." + +#: pkg/kubectl/cmd/create_service.go:113 +msgid "" +"\n" +" Create a nodeport service with the specified name." +msgstr "" +"\n" +" Create a nodeport service with the specified name." + +#: pkg/kubectl/cmd/clusterinfo_dump.go:53 +msgid "" +"\n" +" Dumps cluster info out suitable for debugging and diagnosing cluster " +"problems. By default, dumps everything to\n" +" stdout. You can optionally specify a directory with --output-directory. " +"If you specify a directory, kubernetes will\n" +" build a set of files in that directory. By default only dumps things in " +"the 'kube-system' namespace, but you can\n" +" switch to a different namespace with the --namespaces flag, or specify --" +"all-namespaces to dump all namespaces.\n" +"\n" +" The command also dumps the logs of all of the pods in the cluster, these " +"logs are dumped into different directories\n" +" based on namespace and pod name." +msgstr "" +"\n" +" Dumps cluster info out suitable for debugging and diagnosing cluster " +"problems. By default, dumps everything to\n" +" stdout. You can optionally specify a directory with —output-directory. " +"If you specify a directory, kubernetes will\n" +" build a set of files in that directory. By default only dumps things in " +"the ‘kube-system’ namespace, but you can\n" +" switch to a different namespace with the —namespaces flag, or specify —" +"all-namespaces to dump all namespaces.\n" +"\n" +" The command also dumps the logs of all of the pods in the cluster, these " +"logs are dumped into different directories\n" +" based on namespace and pod name." + +#: pkg/kubectl/cmd/clusterinfo.go:37 +msgid "" +"\n" +" Display addresses of the master and services with label kubernetes.io/" +"cluster-service=true\n" +" To further debug and diagnose cluster problems, use 'kubectl cluster-info " +"dump'." +msgstr "" +"\n" +" Display addresses of the master and services with label kubernetes.io/" +"cluster-service=true\n" +" To further debug and diagnose cluster problems, use ‘kubectl cluster-info " +"dump’." + +#: pkg/kubectl/cmd/create_quota.go:62 +msgid "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." +msgstr "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." + +#: pkg/kubectl/cmd/create_quota.go:61 +msgid "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." +msgstr "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." + +#: pkg/kubectl/cmd/create_pdb.go:64 +msgid "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." +msgstr "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." + +#: pkg/kubectl/cmd/expose.go:104 +msgid "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" +msgstr "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" + +#: pkg/kubectl/cmd/run.go:139 +msgid "A schedule in the Cron format the job should be run with." +msgstr "A schedule in the Cron format the job should be run with." + +#: pkg/kubectl/cmd/expose.go:109 +msgid "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." +msgstr "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." + +#: pkg/kubectl/cmd/expose.go:110 pkg/kubectl/cmd/run.go:122 +msgid "" +"An inline JSON override for the generated object. If this is non-empty, it " +"is used to override the generated object. Requires that the object supply a " +"valid apiVersion field." +msgstr "" +"An inline JSON override for the generated object. If this is non-empty, it " +"is used to override the generated object. Requires that the object supply a " +"valid apiVersion field." + +#: pkg/kubectl/cmd/run.go:137 +msgid "" +"An inline JSON override for the generated service object. If this is non-" +"empty, it is used to override the generated object. Requires that the object " +"supply a valid apiVersion field. Only used if --expose is true." +msgstr "" +"An inline JSON override for the generated service object. If this is non-" +"empty, it is used to override the generated object. Requires that the object " +"supply a valid apiVersion field. Only used if —expose is true." # https://github.com/kubernetes/kubernetes/blob/masterpkg/kubectl/cmd/apply.go#L98 +#: pkg/kubectl/cmd/apply.go:104 msgid "Apply a configuration to a resource by filename or stdin" msgstr "ファイル名を指定または標準入力経由でリソースにコンフィグを適用する" +#: pkg/kubectl/cmd/certificates.go:72 +msgid "Approve a certificate signing request" +msgstr "Approve a certificate signing request" + +#: pkg/kubectl/cmd/create_service.go:82 +msgid "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." +msgstr "" +"Assign your own ClusterIP or set to ‘None’ for a ‘headless’ service (no " +"loadbalancing)." + +#: pkg/kubectl/cmd/attach.go:70 +msgid "Attach to a running container" +msgstr "Attach to a running container" + +#: pkg/kubectl/cmd/autoscale.go:56 +msgid "Auto-scale a Deployment, ReplicaSet, or ReplicationController" +msgstr "Auto-scale a Deployment, ReplicaSet, or ReplicationController" + +#: pkg/kubectl/cmd/expose.go:113 +msgid "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to 'None' to create a headless service." +msgstr "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to ‘None’ to create a headless service." + +#: pkg/kubectl/cmd/create_clusterrolebinding.go:56 +msgid "ClusterRole this ClusterRoleBinding should reference" +msgstr "ClusterRole this ClusterRoleBinding should reference" + +#: pkg/kubectl/cmd/create_rolebinding.go:56 +msgid "ClusterRole this RoleBinding should reference" +msgstr "ClusterRole this RoleBinding should reference" + +#: pkg/kubectl/cmd/rollingupdate.go:102 +msgid "" +"Container name which will have its image upgraded. Only relevant when --" +"image is specified, ignored otherwise. Required when using --image on a " +"multi-container pod" +msgstr "" +"Container name which will have its image upgraded. Only relevant when —image " +"is specified, ignored otherwise. Required when using —image on a multi-" +"container pod" + +#: pkg/kubectl/cmd/convert.go:68 +msgid "Convert config files between different API versions" +msgstr "Convert config files between different API versions" + +#: pkg/kubectl/cmd/cp.go:65 +msgid "Copy files and directories to and from containers." +msgstr "Copy files and directories to and from containers." + +#: pkg/kubectl/cmd/create_clusterrolebinding.go:44 +msgid "Create a ClusterRoleBinding for a particular ClusterRole" +msgstr "Create a ClusterRoleBinding for a particular ClusterRole" + +#: pkg/kubectl/cmd/create_service.go:182 +msgid "Create a LoadBalancer service." +msgstr "Create a LoadBalancer service." + +#: pkg/kubectl/cmd/create_service.go:125 +msgid "Create a NodePort service." +msgstr "Create a NodePort service." + +#: pkg/kubectl/cmd/create_rolebinding.go:44 +msgid "Create a RoleBinding for a particular Role or ClusterRole" +msgstr "Create a RoleBinding for a particular Role or ClusterRole" + +#: pkg/kubectl/cmd/create_secret.go:214 +msgid "Create a TLS secret" +msgstr "Create a TLS secret" + +#: pkg/kubectl/cmd/create_service.go:69 +msgid "Create a clusterIP service." +msgstr "Create a clusterIP service." + +#: pkg/kubectl/cmd/create_configmap.go:60 +msgid "Create a configmap from a local file, directory or literal value" +msgstr "Create a configmap from a local file, directory or literal value" + +#: pkg/kubectl/cmd/create_deployment.go:46 +msgid "Create a deployment with the specified name." +msgstr "Create a deployment with the specified name." + +#: pkg/kubectl/cmd/create_namespace.go:45 +msgid "Create a namespace with the specified name" +msgstr "Create a namespace with the specified name" + +#: pkg/kubectl/cmd/create_pdb.go:50 +msgid "Create a pod disruption budget with the specified name." +msgstr "Create a pod disruption budget with the specified name." + +#: pkg/kubectl/cmd/create_quota.go:48 +msgid "Create a quota with the specified name." +msgstr "Create a quota with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/masterpkg/kubectl/cmd/apply.go#L98 +#: pkg/kubectl/cmd/create.go:63 +msgid "Create a resource by filename or stdin" +msgstr "ファイル名を指定または標準入力経由でリソースを作成する" + +#: pkg/kubectl/cmd/create_secret.go:144 +msgid "Create a secret for use with a Docker registry" +msgstr "Create a secret for use with a Docker registry" + +#: pkg/kubectl/cmd/create_secret.go:74 +msgid "Create a secret from a local file, directory or literal value" +msgstr "Create a secret from a local file, directory or literal value" + +#: pkg/kubectl/cmd/create_secret.go:35 +msgid "Create a secret using specified subcommand" +msgstr "Create a secret using specified subcommand" + +#: pkg/kubectl/cmd/create_serviceaccount.go:45 +msgid "Create a service account with the specified name" +msgstr "Create a service account with the specified name" + +#: pkg/kubectl/cmd/create_service.go:37 +msgid "Create a service using specified subcommand." +msgstr "Create a service using specified subcommand." + +#: pkg/kubectl/cmd/create_service.go:241 +msgid "Create an ExternalName service." +msgstr "Create an ExternalName service." + +#: pkg/kubectl/cmd/delete.go:132 +msgid "" +"Delete resources by filenames, stdin, resources and names, or by resources " +"and label selector" +msgstr "" +"Delete resources by filenames, stdin, resources and names, or by resources " +"and label selector" + # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_cluster.go#L38 +#: pkg/kubectl/cmd/config/delete_cluster.go:39 msgid "Delete the specified cluster from the kubeconfig" msgstr "kubeconfigから指定したクラスターを削除する" # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_context.go#L38 +#: pkg/kubectl/cmd/config/delete_context.go:39 msgid "Delete the specified context from the kubeconfig" msgstr "kubeconfigから指定したコンテキストを削除する" +#: pkg/kubectl/cmd/certificates.go:122 +msgid "Deny a certificate signing request" +msgstr "Deny a certificate signing request" + +#: pkg/kubectl/cmd/stop.go:59 +msgid "Deprecated: Gracefully shut down a resource by name or filename" +msgstr "Deprecated: Gracefully shut down a resource by name or filename" + # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_contexts.go#L62 +#: pkg/kubectl/cmd/config/get_contexts.go:64 msgid "Describe one or many contexts" msgstr "1つまたは複数のコンテキストを記述する" +#: pkg/kubectl/cmd/top_node.go:78 +msgid "Display Resource (CPU/Memory) usage of nodes" +msgstr "Display Resource (CPU/Memory) usage of nodes" + +#: pkg/kubectl/cmd/top_pod.go:80 +msgid "Display Resource (CPU/Memory) usage of pods" +msgstr "Display Resource (CPU/Memory) usage of pods" + +#: pkg/kubectl/cmd/top.go:44 +msgid "Display Resource (CPU/Memory) usage." +msgstr "Display Resource (CPU/Memory) usage." + # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_clusters.go#L40 +#: pkg/kubectl/cmd/clusterinfo.go:51 +#| msgid "Display clusters defined in the kubeconfig" +msgid "Display cluster info" +msgstr "クラスターの情報を表示する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_clusters.go#L40 +#: pkg/kubectl/cmd/config/get_clusters.go:41 msgid "Display clusters defined in the kubeconfig" msgstr "kubeconfigで定義されたクラスターを表示する" # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/view.go#L64 +#: pkg/kubectl/cmd/config/view.go:67 msgid "Display merged kubeconfig settings or a specified kubeconfig file" -msgstr "マージされたkubeconfigの設定または指定されたkubeconfigファイルを表示する" +msgstr "" +"マージされたkubeconfigの設定または指定されたkubeconfigファイルを表示する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_contexts.go#L62 +#: pkg/kubectl/cmd/get.go:111 +msgid "Display one or many resources" +msgstr "1つまたは複数のリソースを表示する" # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/current_context.go#L48 +#: pkg/kubectl/cmd/config/current_context.go:49 msgid "Displays the current-context" msgstr "カレントコンテキストを表示する" +#: pkg/kubectl/cmd/explain.go:51 +msgid "Documentation of resources" +msgstr "リソースの説明を表示する" + +#: pkg/kubectl/cmd/drain.go:178 +msgid "Drain node in preparation for maintenance" +msgstr "Drain node in preparation for maintenance" + +#: pkg/kubectl/cmd/clusterinfo_dump.go:39 +msgid "Dump lots of relevant info for debugging and diagnosis" +msgstr "Dump lots of relevant info for debugging and diagnosis" + +#: pkg/kubectl/cmd/edit.go:110 +msgid "Edit a resource on the server" +msgstr "Edit a resource on the server" + +#: pkg/kubectl/cmd/create_secret.go:160 +msgid "Email for Docker registry" +msgstr "Email for Docker registry" + +#: pkg/kubectl/cmd/exec.go:69 +msgid "Execute a command in a container" +msgstr "Execute a command in a container" + +#: pkg/kubectl/cmd/rollingupdate.go:103 +msgid "" +"Explicit policy for when to pull container images. Required when --image is " +"same as existing image, ignored otherwise." +msgstr "" +"Explicit policy for when to pull container images. Required when —image is " +"same as existing image, ignored otherwise." + +#: pkg/kubectl/cmd/portforward.go:76 +msgid "Forward one or more local ports to a pod" +msgstr "Forward one or more local ports to a pod" + +#: pkg/kubectl/cmd/help.go:37 +msgid "Help about any command" +msgstr "Help about any command" + +#: pkg/kubectl/cmd/expose.go:103 +msgid "" +"IP to assign to the Load Balancer. If empty, an ephemeral IP will be created " +"and used (cloud-provider specific)." +msgstr "" +"IP to assign to the Load Balancer. If empty, an ephemeral IP will be created " +"and used (cloud-provider specific)." + +#: pkg/kubectl/cmd/expose.go:112 +msgid "" +"If non-empty, set the session affinity for the service to this; legal " +"values: 'None', 'ClientIP'" +msgstr "" +"If non-empty, set the session affinity for the service to this; legal " +"values: ‘None’, ‘ClientIP’" + +#: pkg/kubectl/cmd/annotate.go:136 +msgid "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." + +#: pkg/kubectl/cmd/label.go:134 +msgid "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." + +#: pkg/kubectl/cmd/rollingupdate.go:99 +msgid "" +"Image to use for upgrading the replication controller. Must be distinct from " +"the existing image (either new image or new image tag). Can not be used " +"with --filename/-f" +msgstr "" +"Image to use for upgrading the replication controller. Must be distinct from " +"the existing image (either new image or new image tag). Can not be used " +"with —filename/-f" + +#: pkg/kubectl/cmd/rollout/rollout.go:47 +msgid "Manage a deployment rollout" +msgstr "Manage a deployment rollout" + +#: pkg/kubectl/cmd/drain.go:128 +msgid "Mark node as schedulable" +msgstr "Mark node as schedulable" + +#: pkg/kubectl/cmd/drain.go:103 +msgid "Mark node as unschedulable" +msgstr "Mark node as unschedulable" + +#: pkg/kubectl/cmd/rollout/rollout_pause.go:74 +msgid "Mark the provided resource as paused" +msgstr "Mark the provided resource as paused" + +#: pkg/kubectl/cmd/certificates.go:36 +msgid "Modify certificate resources." +msgstr "Modify certificate resources." + # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/config.go#L39 +#: pkg/kubectl/cmd/config/config.go:40 msgid "Modify kubeconfig files" msgstr "kubeconfigファイルを変更する" +#: pkg/kubectl/cmd/expose.go:108 +msgid "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." +msgstr "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." + +#: pkg/kubectl/cmd/logs.go:113 +msgid "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." +msgstr "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." + +#: pkg/kubectl/cmd/completion.go:104 +msgid "Output shell completion code for the specified shell (bash or zsh)" +msgstr "Output shell completion code for the specified shell (bash or zsh)" + +#: pkg/kubectl/cmd/convert.go:85 +msgid "" +"Output the formatted object with the given group version (for ex: " +"'extensions/v1beta1').)" +msgstr "" +"Output the formatted object with the given group version (for ex: " +"‘extensions/v1beta1’).)" + +#: pkg/kubectl/cmd/create_secret.go:158 +msgid "Password for Docker registry authentication" +msgstr "Password for Docker registry authentication" + +#: pkg/kubectl/cmd/create_secret.go:226 +msgid "Path to PEM encoded public key certificate." +msgstr "Path to PEM encoded public key certificate." + +#: pkg/kubectl/cmd/create_secret.go:227 +msgid "Path to private key associated with given certificate." +msgstr "Path to private key associated with given certificate." + +#: pkg/kubectl/cmd/rollingupdate.go:85 +msgid "Perform a rolling update of the given ReplicationController" +msgstr "Perform a rolling update of the given ReplicationController" + +#: pkg/kubectl/cmd/scale.go:83 +msgid "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." +msgstr "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." + +#: pkg/kubectl/cmd/version.go:40 +msgid "Print the client and server version information" +msgstr "Print the client and server version information" + +#: pkg/kubectl/cmd/options.go:38 +msgid "Print the list of flags inherited by all commands" +msgstr "Print the list of flags inherited by all commands" + +#: pkg/kubectl/cmd/logs.go:93 +msgid "Print the logs for a container in a pod" +msgstr "Print the logs for a container in a pod" + +# https://github.com/kubernetes/kubernetes/blob/masterpkg/kubectl/cmd/apply.go#L98 +#: pkg/kubectl/cmd/replace.go:71 +msgid "Replace a resource by filename or stdin" +msgstr "Replace a resource by filename or stdin" + +#: pkg/kubectl/cmd/rollout/rollout_resume.go:72 +msgid "Resume a paused resource" +msgstr "Resume a paused resource" + +#: pkg/kubectl/cmd/create_rolebinding.go:57 +msgid "Role this RoleBinding should reference" +msgstr "Role this RoleBinding should reference" + +#: pkg/kubectl/cmd/run.go:97 +msgid "Run a particular image on the cluster" +msgstr "Run a particular image on the cluster" + +#: pkg/kubectl/cmd/proxy.go:69 +msgid "Run a proxy to the Kubernetes API server" +msgstr "Run a proxy to the Kubernetes API server" + +#: pkg/kubectl/cmd/create_secret.go:161 +msgid "Server location for Docker registry" +msgstr "Server location for Docker registry" + +#: pkg/kubectl/cmd/scale.go:71 +msgid "" +"Set a new size for a Deployment, ReplicaSet, Replication Controller, or Job" +msgstr "" +"Set a new size for a Deployment, ReplicaSet, Replication Controller, or Job" + +#: pkg/kubectl/cmd/set/set.go:38 +msgid "Set specific features on objects" +msgstr "Set specific features on objects" + +#: pkg/kubectl/cmd/apply_set_last_applied.go:83 +msgid "" +"Set the last-applied-configuration annotation on a live object to match the " +"contents of a file." +msgstr "" +"Set the last-applied-configuration annotation on a live object to match the " +"contents of a file." + +#: pkg/kubectl/cmd/set/set_selector.go:82 +msgid "Set the selector on a resource" +msgstr "リソースのセレクターを設定する" + # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_cluster.go#L67 +#: pkg/kubectl/cmd/config/create_cluster.go:68 msgid "Sets a cluster entry in kubeconfig" msgstr "kubeconfigにクラスターエントリを設定する" # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_context.go#L57 +#: pkg/kubectl/cmd/config/create_context.go:58 msgid "Sets a context entry in kubeconfig" msgstr "kubeconfigにコンテキストエントリを設定する" # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_authinfo.go#L103 +#: pkg/kubectl/cmd/config/create_authinfo.go:104 msgid "Sets a user entry in kubeconfig" msgstr "kubeconfigにユーザーエントリを設定する" # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/set.go#L59 +#: pkg/kubectl/cmd/config/set.go:60 msgid "Sets an individual value in a kubeconfig file" msgstr "kubeconfigファイル内の変数を個別に設定する" # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/use_context.go#L48 +#: pkg/kubectl/cmd/config/use_context.go:49 msgid "Sets the current-context in a kubeconfig file" msgstr "kubeconfigにカレントコンテキストを設定する" +#: pkg/kubectl/cmd/describe.go:86 +msgid "Show details of a specific resource or group of resources" +msgstr "Show details of a specific resource or group of resources" + +#: pkg/kubectl/cmd/rollout/rollout_status.go:58 +msgid "Show the status of the rollout" +msgstr "Show the status of the rollout" + +#: pkg/kubectl/cmd/expose.go:106 +msgid "Synonym for --target-port" +msgstr "Synonym for —target-port" + +#: pkg/kubectl/cmd/expose.go:88 +msgid "" +"Take a replication controller, service, deployment or pod and expose it as a " +"new Kubernetes Service" +msgstr "" +"Take a replication controller, service, deployment or pod and expose it as a " +"new Kubernetes Service" + +#: pkg/kubectl/cmd/run.go:117 +msgid "The image for the container to run." +msgstr "The image for the container to run." + +#: pkg/kubectl/cmd/run.go:119 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" +msgstr "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" + +#: pkg/kubectl/cmd/rollingupdate.go:101 +msgid "" +"The key to use to differentiate between two different controllers, default " +"'deployment'. Only relevant when --image is specified, ignored otherwise" +msgstr "" +"The key to use to differentiate between two different controllers, default " +"‘deployment’. Only relevant when —image is specified, ignored otherwise" + +#: pkg/kubectl/cmd/create_pdb.go:63 +msgid "" +"The minimum number or percentage of available pods this budget requires." +msgstr "" +"The minimum number or percentage of available pods this budget requires." + +#: pkg/kubectl/cmd/expose.go:111 +msgid "The name for the newly created object." +msgstr "The name for the newly created object." + +#: pkg/kubectl/cmd/autoscale.go:72 +msgid "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." +msgstr "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." + +#: pkg/kubectl/cmd/run.go:116 +msgid "" +"The name of the API generator to use, see http://kubernetes.io/docs/user-" +"guide/kubectl-conventions/#generators for a list." +msgstr "" +"The name of the API generator to use, see http://kubernetes.io/docs/user-" +"guide/kubectl-conventions/#generators for a list." + +#: pkg/kubectl/cmd/autoscale.go:67 +msgid "" +"The name of the API generator to use. Currently there is only 1 generator." +msgstr "" +"The name of the API generator to use. Currently there is only 1 generator." + +#: pkg/kubectl/cmd/expose.go:99 +msgid "" +"The name of the API generator to use. There are 2 generators: 'service/v1' " +"and 'service/v2'. The only difference between them is that service port in " +"v1 is named 'default', while it is left unnamed in v2. Default is 'service/" +"v2'." +msgstr "" +"The name of the API generator to use. There are 2 generators: ‘service/v1’ " +"and ‘service/v2’. The only difference between them is that service port in " +"v1 is named ‘default’, while it is left unnamed in v2. Default is ‘service/" +"v2’." + +#: pkg/kubectl/cmd/run.go:136 +msgid "" +"The name of the generator to use for creating a service. Only used if --" +"expose is true" +msgstr "" +"The name of the generator to use for creating a service. Only used if —" +"expose is true" + +#: pkg/kubectl/cmd/expose.go:100 +msgid "The network protocol for the service to be created. Default is 'TCP'." +msgstr "The network protocol for the service to be created. Default is ‘TCP’." + +#: pkg/kubectl/cmd/expose.go:101 +msgid "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" +msgstr "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" + +#: pkg/kubectl/cmd/run.go:124 +msgid "" +"The port that this container exposes. If --expose is true, this is also the " +"port used by the service that is created." +msgstr "" +"The port that this container exposes. If —expose is true, this is also the " +"port used by the service that is created." + +#: pkg/kubectl/cmd/run.go:134 +msgid "" +"The resource requirement limits for this container. For example, 'cpu=200m," +"memory=512Mi'. Note that server side components may assign limits depending " +"on the server configuration, such as limit ranges." +msgstr "" +"The resource requirement limits for this container. For example, ‘cpu=200m," +"memory=512Mi’. Note that server side components may assign limits depending " +"on the server configuration, such as limit ranges." + +#: pkg/kubectl/cmd/run.go:133 +msgid "" +"The resource requirement requests for this container. For example, " +"'cpu=100m,memory=256Mi'. Note that server side components may assign " +"requests depending on the server configuration, such as limit ranges." +msgstr "" +"The resource requirement requests for this container. For example, " +"‘cpu=100m,memory=256Mi’. Note that server side components may assign " +"requests depending on the server configuration, such as limit ranges." + +#: pkg/kubectl/cmd/run.go:131 +msgid "" +"The restart policy for this Pod. Legal values [Always, OnFailure, Never]. " +"If set to 'Always' a deployment is created, if set to 'OnFailure' a job is " +"created, if set to 'Never', a regular pod is created. For the latter two --" +"replicas must be 1. Default 'Always', for CronJobs ` + "`" + `Never` + "`" + `." +msgstr "" +"The restart policy for this Pod. Legal values [Always, OnFailure, Never]. " +"If set to ‘Always’ a deployment is created, if set to ‘OnFailure’ a job is " +"created, if set to ‘Never’, a regular pod is created. For the latter two —" +"replicas must be 1. Default ‘Always’, for CronJobs ` + "`" + `Never` + "`" + `." + +#: pkg/kubectl/cmd/create_secret.go:88 +msgid "The type of secret to create" +msgstr "The type of secret to create" + +#: pkg/kubectl/cmd/expose.go:102 +msgid "" +"Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is " +"'ClusterIP'." +msgstr "" +"Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is " +"‘ClusterIP’." + +#: pkg/kubectl/cmd/rollout/rollout_undo.go:72 +msgid "Undo a previous rollout" +msgstr "現在のロールアウトを取り消す" + # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/unset.go#L47 +#: pkg/kubectl/cmd/config/unset.go:48 msgid "Unsets an individual value in a kubeconfig file" msgstr "kubeconfigファイルから変数を個別に削除する" +#: pkg/kubectl/cmd/patch.go:96 +msgid "Update field(s) of a resource using strategic merge patch" +msgstr "Update field(s) of a resource using strategic merge patch" + +#: pkg/kubectl/cmd/set/set_image.go:95 +msgid "Update image of a pod template" +msgstr "Update image of a pod template" + +#: pkg/kubectl/cmd/set/set_resources.go:102 +msgid "Update resource requests/limits on objects with pod templates" +msgstr "Update resource requests/limits on objects with pod templates" + +#: pkg/kubectl/cmd/annotate.go:116 msgid "Update the annotations on a resource" msgstr "リソースのアノテーションを更新する" +#: pkg/kubectl/cmd/label.go:114 +msgid "Update the labels on a resource" +msgstr "リソースのラベルを更新する" + +#: pkg/kubectl/cmd/taint.go:87 +msgid "Update the taints on one or more nodes" +msgstr "Update the taints on one or more nodes" + +#: pkg/kubectl/cmd/create_secret.go:156 +msgid "Username for Docker registry authentication" +msgstr "Username for Docker registry authentication" + +#: pkg/kubectl/cmd/apply_view_last_applied.go:64 +msgid "View latest last-applied-configuration annotations of a resource/object" +msgstr "" +"View latest last-applied-configuration annotations of a resource/object" + +#: pkg/kubectl/cmd/rollout/rollout_history.go:52 +msgid "View rollout history" +msgstr "ロールアウトの履歴を表示する" + +#: pkg/kubectl/cmd/clusterinfo_dump.go:46 msgid "" -"watch is only supported on individual resources and resource collections - " -"%d resources were found" -msgid_plural "" -"watch is only supported on individual resources and resource collections - " -"%d resources were found" -msgstr[0] "" -"watchは単一リソース及びリソースコレクションのみサポートしています - " -"%d個のリソースが見つかりました" -msgstr[1] "" -"watchは単一リソース及びリソースコレクションのみサポートしています - " -"%d個のリソースが見つかりました" +"Where to output the files. If empty or '-' uses stdout, otherwise creates a " +"directory hierarchy in that directory" +msgstr "" +"Where to output the files. If empty or ‘-‘ uses stdout, otherwise creates a " +"directory hierarchy in that directory" + +#: pkg/kubectl/cmd/run_test.go:85 +msgid "dummy restart flag)" +msgstr "dummy restart flag)" + +#: pkg/kubectl/cmd/create_service.go:254 +msgid "external name of service" +msgstr "external name of service" + +#: pkg/kubectl/cmd/cmd.go:227 +msgid "kubectl controls the Kubernetes cluster manager" +msgstr "kubectl controls the Kubernetes cluster manager" + +#~ msgid "" +#~ "watch is only supported on individual resources and resource collections " +#~ "- %d resources were found" +#~ msgid_plural "" +#~ "watch is only supported on individual resources and resource collections " +#~ "- %d resources were found" +#~ msgstr[0] "" +#~ "watchは単一リソース及びリソースコレクションのみサポートしています - %d個の" +#~ "リソースが見つかりました" +#~ msgstr[1] "" +#~ "watchは単一リソース及びリソースコレクションのみサポートしています - %d個の" +#~ "リソースが見つかりました" `) func translationsKubectlJa_jpLc_messagesK8sPoBytes() ([]byte, error) { diff --git a/pkg/kubectl/polymorphichelpers/BUILD b/pkg/kubectl/polymorphichelpers/BUILD index 235416ff41..cbf43e685c 100644 --- a/pkg/kubectl/polymorphichelpers/BUILD +++ b/pkg/kubectl/polymorphichelpers/BUILD @@ -4,7 +4,6 @@ go_library( name = "go_default_library", srcs = [ "attachablepodforobject.go", - "canbeautoscaled.go", "canbeexposed.go", "helpers.go", "historyviewer.go", @@ -51,7 +50,6 @@ go_library( go_test( name = "go_default_test", srcs = [ - "canbeautoscaled_test.go", "canbeexposed_test.go", "helpers_test.go", "logsforobject_test.go", diff --git a/pkg/kubectl/polymorphichelpers/canbeautoscaled.go b/pkg/kubectl/polymorphichelpers/canbeautoscaled.go deleted file mode 100644 index c91e816f7d..0000000000 --- a/pkg/kubectl/polymorphichelpers/canbeautoscaled.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package polymorphichelpers - -import ( - "fmt" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -func canBeAutoscaled(kind schema.GroupKind) error { - switch kind { - case - corev1.SchemeGroupVersion.WithKind("ReplicationController").GroupKind(), - appsv1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), - appsv1.SchemeGroupVersion.WithKind("ReplicaSet").GroupKind(), - appsv1.SchemeGroupVersion.WithKind("StatefulSet").GroupKind(), - extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), - extensionsv1beta1.SchemeGroupVersion.WithKind("ReplicaSet").GroupKind(): - // nothing to do here - default: - return fmt.Errorf("cannot autoscale a %v", kind) - } - return nil -} diff --git a/pkg/kubectl/polymorphichelpers/canbeautoscaled_test.go b/pkg/kubectl/polymorphichelpers/canbeautoscaled_test.go deleted file mode 100644 index b1e1fc92f0..0000000000 --- a/pkg/kubectl/polymorphichelpers/canbeautoscaled_test.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package polymorphichelpers - -import ( - "testing" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -func TestCanBeAutoscaled(t *testing.T) { - tests := []struct { - kind schema.GroupKind - expectErr bool - }{ - { - kind: corev1.SchemeGroupVersion.WithKind("ReplicationController").GroupKind(), - expectErr: false, - }, - { - kind: appsv1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), - expectErr: false, - }, - { - kind: appsv1.SchemeGroupVersion.WithKind("StatefulSet").GroupKind(), - expectErr: false, - }, - { - kind: extensionsv1beta1.SchemeGroupVersion.WithKind("ReplicaSet").GroupKind(), - expectErr: false, - }, - { - kind: corev1.SchemeGroupVersion.WithKind("Node").GroupKind(), - expectErr: true, - }, - { - kind: corev1.SchemeGroupVersion.WithKind("Service").GroupKind(), - expectErr: true, - }, - { - kind: corev1.SchemeGroupVersion.WithKind("Pod").GroupKind(), - expectErr: true, - }, - } - - for _, test := range tests { - err := canBeAutoscaled(test.kind) - if test.expectErr && err == nil { - t.Error("unexpected non-error") - } - if !test.expectErr && err != nil { - t.Errorf("unexpected error: %v", err) - } - } -} diff --git a/pkg/kubectl/polymorphichelpers/helpers_test.go b/pkg/kubectl/polymorphichelpers/helpers_test.go index 2aba02e6db..7afcd90c0f 100644 --- a/pkg/kubectl/polymorphichelpers/helpers_test.go +++ b/pkg/kubectl/polymorphichelpers/helpers_test.go @@ -174,7 +174,7 @@ func TestGetFirstPod(t *testing.T) { } selector := labels.Set(labelSet).AsSelector() - pod, numPods, err := GetFirstPod(fake.Core(), metav1.NamespaceDefault, selector.String(), 1*time.Minute, test.sortBy) + pod, numPods, err := GetFirstPod(fake.CoreV1(), metav1.NamespaceDefault, selector.String(), 1*time.Minute, test.sortBy) pod.Spec.SecurityContext = nil if !test.expectedErr && err != nil { t.Errorf("%s: unexpected error: %v", test.name, err) diff --git a/pkg/kubectl/polymorphichelpers/interface.go b/pkg/kubectl/polymorphichelpers/interface.go index 15cb482c7b..46f3b5a15a 100644 --- a/pkg/kubectl/polymorphichelpers/interface.go +++ b/pkg/kubectl/polymorphichelpers/interface.go @@ -79,12 +79,6 @@ type PortsForObjectFunc func(object runtime.Object) ([]string, error) // PortsForObjectFn gives a way to easily override the function for unit testing if needed var PortsForObjectFn PortsForObjectFunc = portsForObject -// CanBeAutoscaledFunc checks whether the kind of resources could be autoscaled -type CanBeAutoscaledFunc func(kind schema.GroupKind) error - -// CanBeAutoscaledFn gives a way to easily override the function for unit testing if needed -var CanBeAutoscaledFn CanBeAutoscaledFunc = canBeAutoscaled - // CanBeExposedFunc is a function type that can tell you whether a given GroupKind is capable of being exposed type CanBeExposedFunc func(kind schema.GroupKind) error diff --git a/pkg/kubectl/rolling_updater_test.go b/pkg/kubectl/rolling_updater_test.go index 824f2ac82c..ac37bef85c 100644 --- a/pkg/kubectl/rolling_updater_test.go +++ b/pkg/kubectl/rolling_updater_test.go @@ -903,8 +903,8 @@ func TestUpdate_assignOriginalAnnotation(t *testing.T) { newRc := newRc(1, 1) fake := fake.NewSimpleClientset(oldRc) updater := &RollingUpdater{ - rcClient: fake.Core(), - podClient: fake.Core(), + rcClient: fake.CoreV1(), + podClient: fake.CoreV1(), ns: "default", scaleAndWait: func(rc *corev1.ReplicationController, retry *RetryParams, wait *RetryParams) (*corev1.ReplicationController, error) { return rc, nil @@ -1101,7 +1101,7 @@ func TestRollingUpdater_multipleContainersInPod(t *testing.T) { Container: tt.container, DeploymentKey: tt.deploymentKey, } - updatedRc, err := CreateNewControllerFromCurrentController(fake.Core(), codec, config) + updatedRc, err := CreateNewControllerFromCurrentController(fake.CoreV1(), codec, config) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -1177,8 +1177,8 @@ func TestRollingUpdater_cleanupWithClients(t *testing.T) { fake := fake.NewSimpleClientset(objs...) updater := &RollingUpdater{ ns: "default", - rcClient: fake.Core(), - podClient: fake.Core(), + rcClient: fake.CoreV1(), + podClient: fake.CoreV1(), } config := &RollingUpdaterConfig{ Out: ioutil.Discard, @@ -1227,7 +1227,7 @@ func TestRollingUpdater_cleanupWithClients_Rename(t *testing.T) { return false, nil, nil }) - err := Rename(fake.Core(), rcExisting, rc.Name) + err := Rename(fake.CoreV1(), rcExisting, rc.Name) if err != nil { t.Fatal(err) } @@ -1315,7 +1315,7 @@ func TestFindSourceController(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { fakeClient := fake.NewSimpleClientset(tt.list) - ctrl, err := FindSourceController(fakeClient.Core(), "default", tt.name) + ctrl, err := FindSourceController(fakeClient.CoreV1(), "default", tt.name) if tt.expectError && err == nil { t.Errorf("unexpected non-error") } @@ -1425,7 +1425,7 @@ func TestUpdateExistingReplicationController(t *testing.T) { t.Run(tt.name, func(t *testing.T) { buffer := &bytes.Buffer{} fakeClient := fake.NewSimpleClientset(tt.expectedRc) - rc, err := UpdateExistingReplicationController(fakeClient.Core(), fakeClient.Core(), tt.rc, "default", tt.name, tt.deploymentKey, tt.deploymentValue, buffer) + rc, err := UpdateExistingReplicationController(fakeClient.CoreV1(), fakeClient.CoreV1(), tt.rc, "default", tt.name, tt.deploymentKey, tt.deploymentValue, buffer) if !reflect.DeepEqual(rc, tt.expectedRc) { t.Errorf("expected:\n%#v\ngot:\n%#v\n", tt.expectedRc, rc) } @@ -1832,8 +1832,8 @@ func TestRollingUpdater_readyPods(t *testing.T) { updater := &RollingUpdater{ ns: "default", - rcClient: client.Core(), - podClient: client.Core(), + rcClient: client.CoreV1(), + podClient: client.CoreV1(), nowFn: tt.nowFn, } oldReady, newReady, err := updater.readyPods(tt.oldRc, tt.newRc, tt.minReadySeconds) diff --git a/pkg/kubelet/dockershim/docker_image.go b/pkg/kubelet/dockershim/docker_image.go index c1089a037a..fb7cdd70b7 100644 --- a/pkg/kubelet/dockershim/docker_image.go +++ b/pkg/kubelet/dockershim/docker_image.go @@ -120,24 +120,27 @@ func (ds *dockerService) RemoveImage(_ context.Context, r *runtimeapi.RemoveImag // TODO: We assume image.Image is image ID here, which is true in the current implementation // of kubelet, but we should still clarify this in CRI. imageInspect, err := ds.client.InspectImageByID(image.Image) - if err == nil && imageInspect != nil && len(imageInspect.RepoTags) > 1 { - for _, tag := range imageInspect.RepoTags { - if _, err := ds.client.RemoveImage(tag, dockertypes.ImageRemoveOptions{PruneChildren: true}); err != nil && !libdocker.IsImageNotFoundError(err) { - return nil, err - } - } - return &runtimeapi.RemoveImageResponse{}, nil - } + // dockerclient.InspectImageByID doesn't work with digest and repoTags, // it is safe to continue removing it since there is another check below. if err != nil && !libdocker.IsImageNotFoundError(err) { return nil, err } - _, err = ds.client.RemoveImage(image.Image, dockertypes.ImageRemoveOptions{PruneChildren: true}) - if err != nil && !libdocker.IsImageNotFoundError(err) { - return nil, err + // An image can have different numbers of RepoTags and RepoDigests. + // Iterating over both of them plus the image ID ensures the image really got removed. + // It also prevents images from being deleted, which actually are deletable using this approach. + var images []string + images = append(images, imageInspect.RepoTags...) + images = append(images, imageInspect.RepoDigests...) + images = append(images, image.Image) + + for _, image := range images { + if _, err := ds.client.RemoveImage(image, dockertypes.ImageRemoveOptions{PruneChildren: true}); err != nil && !libdocker.IsImageNotFoundError(err) { + return nil, err + } } + return &runtimeapi.RemoveImageResponse{}, nil } diff --git a/pkg/kubelet/dockershim/docker_image_test.go b/pkg/kubelet/dockershim/docker_image_test.go index 51363cab4b..3dbd5ef2b3 100644 --- a/pkg/kubelet/dockershim/docker_image_test.go +++ b/pkg/kubelet/dockershim/docker_image_test.go @@ -30,22 +30,57 @@ import ( ) func TestRemoveImage(t *testing.T) { - ds, fakeDocker, _ := newTestDockerService() - id := "1111" - fakeDocker.InjectImageInspects([]dockertypes.ImageInspect{{ID: id, RepoTags: []string{"foo"}}}) - ds.RemoveImage(getTestCTX(), &runtimeapi.RemoveImageRequest{Image: &runtimeapi.ImageSpec{Image: id}}) - fakeDocker.AssertCallDetails(libdocker.NewCalledDetail("inspect_image", nil), - libdocker.NewCalledDetail("remove_image", []interface{}{id, dockertypes.ImageRemoveOptions{PruneChildren: true}})) -} + tests := map[string]struct { + image dockertypes.ImageInspect + calledDetails []libdocker.CalledDetail + }{ + "single tag": { + dockertypes.ImageInspect{ID: "1111", RepoTags: []string{"foo"}}, + []libdocker.CalledDetail{ + libdocker.NewCalledDetail("inspect_image", nil), + libdocker.NewCalledDetail("remove_image", []interface{}{"foo", dockertypes.ImageRemoveOptions{PruneChildren: true}}), + libdocker.NewCalledDetail("remove_image", []interface{}{"1111", dockertypes.ImageRemoveOptions{PruneChildren: true}}), + }, + }, + "multiple tags": { + dockertypes.ImageInspect{ID: "2222", RepoTags: []string{"foo", "bar"}}, + []libdocker.CalledDetail{ + libdocker.NewCalledDetail("inspect_image", nil), + libdocker.NewCalledDetail("remove_image", []interface{}{"foo", dockertypes.ImageRemoveOptions{PruneChildren: true}}), + libdocker.NewCalledDetail("remove_image", []interface{}{"bar", dockertypes.ImageRemoveOptions{PruneChildren: true}}), + libdocker.NewCalledDetail("remove_image", []interface{}{"2222", dockertypes.ImageRemoveOptions{PruneChildren: true}}), + }, + }, + "single tag multiple repo digests": { + dockertypes.ImageInspect{ID: "3333", RepoTags: []string{"foo"}, RepoDigests: []string{"foo@3333", "example.com/foo@3333"}}, + []libdocker.CalledDetail{ + libdocker.NewCalledDetail("inspect_image", nil), + libdocker.NewCalledDetail("remove_image", []interface{}{"foo", dockertypes.ImageRemoveOptions{PruneChildren: true}}), + libdocker.NewCalledDetail("remove_image", []interface{}{"foo@3333", dockertypes.ImageRemoveOptions{PruneChildren: true}}), + libdocker.NewCalledDetail("remove_image", []interface{}{"example.com/foo@3333", dockertypes.ImageRemoveOptions{PruneChildren: true}}), + libdocker.NewCalledDetail("remove_image", []interface{}{"3333", dockertypes.ImageRemoveOptions{PruneChildren: true}}), + }, + }, + "no tags multiple repo digests": { + dockertypes.ImageInspect{ID: "4444", RepoTags: []string{}, RepoDigests: []string{"foo@4444", "example.com/foo@4444"}}, + []libdocker.CalledDetail{ + libdocker.NewCalledDetail("inspect_image", nil), + libdocker.NewCalledDetail("remove_image", []interface{}{"foo@4444", dockertypes.ImageRemoveOptions{PruneChildren: true}}), + libdocker.NewCalledDetail("remove_image", []interface{}{"example.com/foo@4444", dockertypes.ImageRemoveOptions{PruneChildren: true}}), + libdocker.NewCalledDetail("remove_image", []interface{}{"4444", dockertypes.ImageRemoveOptions{PruneChildren: true}}), + }, + }, + } -func TestRemoveImageWithMultipleTags(t *testing.T) { - ds, fakeDocker, _ := newTestDockerService() - id := "1111" - fakeDocker.InjectImageInspects([]dockertypes.ImageInspect{{ID: id, RepoTags: []string{"foo", "bar"}}}) - ds.RemoveImage(getTestCTX(), &runtimeapi.RemoveImageRequest{Image: &runtimeapi.ImageSpec{Image: id}}) - fakeDocker.AssertCallDetails(libdocker.NewCalledDetail("inspect_image", nil), - libdocker.NewCalledDetail("remove_image", []interface{}{"foo", dockertypes.ImageRemoveOptions{PruneChildren: true}}), - libdocker.NewCalledDetail("remove_image", []interface{}{"bar", dockertypes.ImageRemoveOptions{PruneChildren: true}})) + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ds, fakeDocker, _ := newTestDockerService() + fakeDocker.InjectImageInspects([]dockertypes.ImageInspect{test.image}) + ds.RemoveImage(getTestCTX(), &runtimeapi.RemoveImageRequest{Image: &runtimeapi.ImageSpec{Image: test.image.ID}}) + err := fakeDocker.AssertCallDetails(test.calledDetails...) + assert.NoError(t, err) + }) + } } func TestPullWithJSONError(t *testing.T) { diff --git a/pkg/kubelet/dockershim/libdocker/fake_client.go b/pkg/kubelet/dockershim/libdocker/fake_client.go index c176bf7139..3f58211380 100644 --- a/pkg/kubelet/dockershim/libdocker/fake_client.go +++ b/pkg/kubelet/dockershim/libdocker/fake_client.go @@ -37,14 +37,14 @@ import ( "k8s.io/apimachinery/pkg/util/clock" ) -type calledDetail struct { +type CalledDetail struct { name string arguments []interface{} } // NewCalledDetail create a new call detail item. -func NewCalledDetail(name string, arguments []interface{}) calledDetail { - return calledDetail{name: name, arguments: arguments} +func NewCalledDetail(name string, arguments []interface{}) CalledDetail { + return CalledDetail{name: name, arguments: arguments} } // FakeDockerClient is a simple fake docker client, so that kubelet can be run for testing without requiring a real docker setup. @@ -58,7 +58,7 @@ type FakeDockerClient struct { Images []dockertypes.ImageSummary ImageIDsNeedingAuth map[string]dockertypes.AuthConfig Errors map[string]error - called []calledDetail + called []CalledDetail pulled []string EnableTrace bool RandGenerator *rand.Rand @@ -132,7 +132,7 @@ func (f *FakeDockerClient) WithRandSource(source rand.Source) *FakeDockerClient return f } -func (f *FakeDockerClient) appendCalled(callDetail calledDetail) { +func (f *FakeDockerClient) appendCalled(callDetail CalledDetail) { if f.EnableTrace { f.called = append(f.called, callDetail) } @@ -183,7 +183,7 @@ func (f *FakeDockerClient) ClearErrors() { func (f *FakeDockerClient) ClearCalls() { f.Lock() defer f.Unlock() - f.called = []calledDetail{} + f.called = []CalledDetail{} f.pulled = []string{} f.Created = []string{} f.Started = []string{} @@ -286,7 +286,7 @@ func (f *FakeDockerClient) AssertCalls(calls []string) (err error) { return } -func (f *FakeDockerClient) AssertCallDetails(calls ...calledDetail) (err error) { +func (f *FakeDockerClient) AssertCallDetails(calls ...CalledDetail) (err error) { f.Lock() defer f.Unlock() @@ -390,7 +390,7 @@ func (f *FakeDockerClient) popError(op string) error { func (f *FakeDockerClient) ListContainers(options dockertypes.ContainerListOptions) ([]dockertypes.Container, error) { f.Lock() defer f.Unlock() - f.appendCalled(calledDetail{name: "list"}) + f.appendCalled(CalledDetail{name: "list"}) err := f.popError("list") containerList := append([]dockertypes.Container{}, f.RunningContainerList...) if options.All { @@ -470,7 +470,7 @@ func toDockerContainerStatus(state string) string { func (f *FakeDockerClient) InspectContainer(id string) (*dockertypes.ContainerJSON, error) { f.Lock() defer f.Unlock() - f.appendCalled(calledDetail{name: "inspect_container"}) + f.appendCalled(CalledDetail{name: "inspect_container"}) err := f.popError("inspect_container") if container, ok := f.ContainerMap[id]; ok { return container, err @@ -487,7 +487,7 @@ func (f *FakeDockerClient) InspectContainer(id string) (*dockertypes.ContainerJS func (f *FakeDockerClient) InspectContainerWithSize(id string) (*dockertypes.ContainerJSON, error) { f.Lock() defer f.Unlock() - f.appendCalled(calledDetail{name: "inspect_container_withsize"}) + f.appendCalled(CalledDetail{name: "inspect_container_withsize"}) err := f.popError("inspect_container_withsize") if container, ok := f.ContainerMap[id]; ok { return container, err @@ -504,7 +504,7 @@ func (f *FakeDockerClient) InspectContainerWithSize(id string) (*dockertypes.Con func (f *FakeDockerClient) InspectImageByRef(name string) (*dockertypes.ImageInspect, error) { f.Lock() defer f.Unlock() - f.appendCalled(calledDetail{name: "inspect_image"}) + f.appendCalled(CalledDetail{name: "inspect_image"}) if err := f.popError("inspect_image"); err != nil { return nil, err } @@ -519,7 +519,7 @@ func (f *FakeDockerClient) InspectImageByRef(name string) (*dockertypes.ImageIns func (f *FakeDockerClient) InspectImageByID(name string) (*dockertypes.ImageInspect, error) { f.Lock() defer f.Unlock() - f.appendCalled(calledDetail{name: "inspect_image"}) + f.appendCalled(CalledDetail{name: "inspect_image"}) if err := f.popError("inspect_image"); err != nil { return nil, err } @@ -555,7 +555,7 @@ func GetFakeContainerID(name string) string { func (f *FakeDockerClient) CreateContainer(c dockertypes.ContainerCreateConfig) (*dockercontainer.ContainerCreateCreatedBody, error) { f.Lock() defer f.Unlock() - f.appendCalled(calledDetail{name: "create"}) + f.appendCalled(CalledDetail{name: "create"}) if err := f.popError("create"); err != nil { return nil, err } @@ -581,7 +581,7 @@ func (f *FakeDockerClient) CreateContainer(c dockertypes.ContainerCreateConfig) func (f *FakeDockerClient) StartContainer(id string) error { f.Lock() defer f.Unlock() - f.appendCalled(calledDetail{name: "start"}) + f.appendCalled(CalledDetail{name: "start"}) if err := f.popError("start"); err != nil { return err } @@ -619,7 +619,7 @@ func (f *FakeDockerClient) StartContainer(id string) error { func (f *FakeDockerClient) StopContainer(id string, timeout time.Duration) error { f.Lock() defer f.Unlock() - f.appendCalled(calledDetail{name: "stop"}) + f.appendCalled(CalledDetail{name: "stop"}) if err := f.popError("stop"); err != nil { return err } @@ -657,7 +657,7 @@ func (f *FakeDockerClient) StopContainer(id string, timeout time.Duration) error func (f *FakeDockerClient) RemoveContainer(id string, opts dockertypes.ContainerRemoveOptions) error { f.Lock() defer f.Unlock() - f.appendCalled(calledDetail{name: "remove"}) + f.appendCalled(CalledDetail{name: "remove"}) err := f.popError("remove") if err != nil { return err @@ -693,7 +693,7 @@ func (f *FakeDockerClient) UpdateContainerResources(id string, updateConfig dock func (f *FakeDockerClient) Logs(id string, opts dockertypes.ContainerLogsOptions, sopts StreamOptions) error { f.Lock() defer f.Unlock() - f.appendCalled(calledDetail{name: "logs"}) + f.appendCalled(CalledDetail{name: "logs"}) return f.popError("logs") } @@ -710,7 +710,7 @@ func (f *FakeDockerClient) isAuthorizedForImage(image string, auth dockertypes.A func (f *FakeDockerClient) PullImage(image string, auth dockertypes.AuthConfig, opts dockertypes.ImagePullOptions) error { f.Lock() defer f.Unlock() - f.appendCalled(calledDetail{name: "pull"}) + f.appendCalled(CalledDetail{name: "pull"}) err := f.popError("pull") if err == nil { if !f.isAuthorizedForImage(image, auth) { @@ -742,21 +742,21 @@ func (f *FakeDockerClient) CreateExec(id string, opts dockertypes.ExecConfig) (* f.Lock() defer f.Unlock() f.execCmd = opts.Cmd - f.appendCalled(calledDetail{name: "create_exec"}) + f.appendCalled(CalledDetail{name: "create_exec"}) return &dockertypes.IDResponse{ID: "12345678"}, nil } func (f *FakeDockerClient) StartExec(startExec string, opts dockertypes.ExecStartCheck, sopts StreamOptions) error { f.Lock() defer f.Unlock() - f.appendCalled(calledDetail{name: "start_exec"}) + f.appendCalled(CalledDetail{name: "start_exec"}) return nil } func (f *FakeDockerClient) AttachToContainer(id string, opts dockertypes.ContainerAttachOptions, sopts StreamOptions) error { f.Lock() defer f.Unlock() - f.appendCalled(calledDetail{name: "attach"}) + f.appendCalled(CalledDetail{name: "attach"}) return nil } @@ -767,7 +767,7 @@ func (f *FakeDockerClient) InspectExec(id string) (*dockertypes.ContainerExecIns func (f *FakeDockerClient) ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.ImageSummary, error) { f.Lock() defer f.Unlock() - f.appendCalled(calledDetail{name: "list_images"}) + f.appendCalled(CalledDetail{name: "list_images"}) err := f.popError("list_images") return f.Images, err } @@ -775,7 +775,7 @@ func (f *FakeDockerClient) ListImages(opts dockertypes.ImageListOptions) ([]dock func (f *FakeDockerClient) RemoveImage(image string, opts dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error) { f.Lock() defer f.Unlock() - f.appendCalled(calledDetail{name: "remove_image", arguments: []interface{}{image, opts}}) + f.appendCalled(CalledDetail{name: "remove_image", arguments: []interface{}{image, opts}}) err := f.popError("remove_image") if err == nil { for i := range f.Images { @@ -833,14 +833,14 @@ func (f *FakeDockerClient) updateContainerStatus(id, status string) { func (f *FakeDockerClient) ResizeExecTTY(id string, height, width uint) error { f.Lock() defer f.Unlock() - f.appendCalled(calledDetail{name: "resize_exec"}) + f.appendCalled(CalledDetail{name: "resize_exec"}) return nil } func (f *FakeDockerClient) ResizeContainerTTY(id string, height, width uint) error { f.Lock() defer f.Unlock() - f.appendCalled(calledDetail{name: "resize_container"}) + f.appendCalled(CalledDetail{name: "resize_container"}) return nil } @@ -884,7 +884,7 @@ func dockerTimestampToString(t time.Time) string { func (f *FakeDockerClient) ImageHistory(id string) ([]dockerimagetypes.HistoryResponseItem, error) { f.Lock() defer f.Unlock() - f.appendCalled(calledDetail{name: "image_history"}) + f.appendCalled(CalledDetail{name: "image_history"}) history := f.ImageHistoryMap[id] return history, nil } @@ -916,6 +916,6 @@ func (f *FakeDockerPuller) GetImageRef(image string) (string, error) { func (f *FakeDockerClient) GetContainerStats(id string) (*dockertypes.StatsJSON, error) { f.Lock() defer f.Unlock() - f.appendCalled(calledDetail{name: "getContainerStats"}) + f.appendCalled(CalledDetail{name: "getContainerStats"}) return nil, fmt.Errorf("not implemented") } diff --git a/pkg/kubelet/stats/cri_stats_provider.go b/pkg/kubelet/stats/cri_stats_provider.go index c2657d9b6a..28bf4b1c21 100644 --- a/pkg/kubelet/stats/cri_stats_provider.go +++ b/pkg/kubelet/stats/cri_stats_provider.go @@ -165,7 +165,7 @@ func (p *criStatsProvider) ListPodStats() ([]statsapi.PodStats, error) { // container stats caStats, caFound := caInfos[containerID] if !caFound { - klog.V(4).Infof("Unable to find cadvisor stats for %q", containerID) + klog.V(5).Infof("Unable to find cadvisor stats for %q", containerID) } else { p.addCadvisorContainerStats(cs, &caStats) } diff --git a/pkg/master/client_ca_hook_test.go b/pkg/master/client_ca_hook_test.go index 1d88dbe0c2..69baf28266 100644 --- a/pkg/master/client_ca_hook_test.go +++ b/pkg/master/client_ca_hook_test.go @@ -215,7 +215,7 @@ func TestWriteClientCAs(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { client := fake.NewSimpleClientset(test.preexistingObjs...) - test.hook.tryToWriteClientCAs(client.Core()) + test.hook.tryToWriteClientCAs(client.CoreV1()) actualConfigMaps, updated := getFinalConfigMaps(client) if !reflect.DeepEqual(test.expectedConfigMaps, actualConfigMaps) { diff --git a/pkg/master/controller_test.go b/pkg/master/controller_test.go index cfab7074ca..682baa0079 100644 --- a/pkg/master/controller_test.go +++ b/pkg/master/controller_test.go @@ -392,7 +392,7 @@ func TestReconcileEndpoints(t *testing.T) { if test.endpoints != nil { fakeClient = fake.NewSimpleClientset(test.endpoints) } - reconciler := reconcilers.NewMasterCountEndpointReconciler(test.additionalMasters+1, fakeClient.Core()) + reconciler := reconcilers.NewMasterCountEndpointReconciler(test.additionalMasters+1, fakeClient.CoreV1()) err := reconciler.ReconcileEndpoints(test.serviceName, net.ParseIP(test.ip), test.endpointPorts, true) if err != nil { t.Errorf("case %q: unexpected error: %v", test.testName, err) @@ -510,7 +510,7 @@ func TestReconcileEndpoints(t *testing.T) { if test.endpoints != nil { fakeClient = fake.NewSimpleClientset(test.endpoints) } - reconciler := reconcilers.NewMasterCountEndpointReconciler(test.additionalMasters+1, fakeClient.Core()) + reconciler := reconcilers.NewMasterCountEndpointReconciler(test.additionalMasters+1, fakeClient.CoreV1()) err := reconciler.ReconcileEndpoints(test.serviceName, net.ParseIP(test.ip), test.endpointPorts, false) if err != nil { t.Errorf("case %q: unexpected error: %v", test.testName, err) @@ -593,7 +593,7 @@ func TestCreateOrUpdateMasterService(t *testing.T) { for _, test := range create_tests { master := Controller{} fakeClient := fake.NewSimpleClientset() - master.ServiceClient = fakeClient.Core() + master.ServiceClient = fakeClient.CoreV1() master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, net.ParseIP("1.2.3.4"), test.servicePorts, test.serviceType, false) creates := []core.CreateAction{} for _, action := range fakeClient.Actions() { @@ -875,7 +875,7 @@ func TestCreateOrUpdateMasterService(t *testing.T) { for _, test := range reconcile_tests { master := Controller{} fakeClient := fake.NewSimpleClientset(test.service) - master.ServiceClient = fakeClient.Core() + master.ServiceClient = fakeClient.CoreV1() err := master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, net.ParseIP("1.2.3.4"), test.servicePorts, test.serviceType, true) if err != nil { t.Errorf("case %q: unexpected error: %v", test.testName, err) @@ -934,7 +934,7 @@ func TestCreateOrUpdateMasterService(t *testing.T) { for _, test := range non_reconcile_tests { master := Controller{} fakeClient := fake.NewSimpleClientset(test.service) - master.ServiceClient = fakeClient.Core() + master.ServiceClient = fakeClient.CoreV1() err := master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, net.ParseIP("1.2.3.4"), test.servicePorts, test.serviceType, false) if err != nil { t.Errorf("case %q: unexpected error: %v", test.testName, err) diff --git a/pkg/master/master_test.go b/pkg/master/master_test.go index 63c9a9ea19..efac9d45f0 100644 --- a/pkg/master/master_test.go +++ b/pkg/master/master_test.go @@ -237,7 +237,7 @@ func makeNodeList(nodes []string, nodeResources apiv1.NodeResources) *apiv1.Node func TestGetNodeAddresses(t *testing.T) { assert := assert.New(t) - fakeNodeClient := fake.NewSimpleClientset(makeNodeList([]string{"node1", "node2"}, apiv1.NodeResources{})).Core().Nodes() + fakeNodeClient := fake.NewSimpleClientset(makeNodeList([]string{"node1", "node2"}, apiv1.NodeResources{})).CoreV1().Nodes() addressProvider := nodeAddressProvider{fakeNodeClient} // Fail case (no addresses associated with nodes) @@ -261,7 +261,7 @@ func TestGetNodeAddresses(t *testing.T) { func TestGetNodeAddressesWithOnlySomeExternalIP(t *testing.T) { assert := assert.New(t) - fakeNodeClient := fake.NewSimpleClientset(makeNodeList([]string{"node1", "node2", "node3"}, apiv1.NodeResources{})).Core().Nodes() + fakeNodeClient := fake.NewSimpleClientset(makeNodeList([]string{"node1", "node2", "node3"}, apiv1.NodeResources{})).CoreV1().Nodes() addressProvider := nodeAddressProvider{fakeNodeClient} // Pass case with 1 External type IP (index == 1) and nodes (indexes 0 & 2) have no External IP. diff --git a/pkg/registry/apps/daemonset/strategy.go b/pkg/registry/apps/daemonset/strategy.go index cb621d695a..02d80a61a7 100644 --- a/pkg/registry/apps/daemonset/strategy.go +++ b/pkg/registry/apps/daemonset/strategy.go @@ -166,6 +166,7 @@ type daemonSetStatusStrategy struct { daemonSetStrategy } +// StatusStrategy is the default logic invoked when updating object status. var StatusStrategy = daemonSetStatusStrategy{Strategy} func (daemonSetStatusStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) { diff --git a/pkg/registry/apps/deployment/strategy.go b/pkg/registry/apps/deployment/strategy.go index 6477a05c90..bca13ba27b 100644 --- a/pkg/registry/apps/deployment/strategy.go +++ b/pkg/registry/apps/deployment/strategy.go @@ -145,6 +145,7 @@ type deploymentStatusStrategy struct { deploymentStrategy } +// StatusStrategy is the default logic invoked when updating object status. var StatusStrategy = deploymentStatusStrategy{Strategy} // PrepareForUpdate clears fields that are not allowed to be set by end users on update of status diff --git a/pkg/registry/apps/statefulset/strategy.go b/pkg/registry/apps/statefulset/strategy.go index 2925702fe4..2b259dc87e 100644 --- a/pkg/registry/apps/statefulset/strategy.go +++ b/pkg/registry/apps/statefulset/strategy.go @@ -130,6 +130,7 @@ type statefulSetStatusStrategy struct { statefulSetStrategy } +// StatusStrategy is the default logic invoked when updating object status. var StatusStrategy = statefulSetStatusStrategy{Strategy} // PrepareForUpdate clears fields that are not allowed to be set by end users on update of status diff --git a/pkg/registry/autoscaling/horizontalpodautoscaler/strategy.go b/pkg/registry/autoscaling/horizontalpodautoscaler/strategy.go index 12dbad2dd0..f26088f22a 100644 --- a/pkg/registry/autoscaling/horizontalpodautoscaler/strategy.go +++ b/pkg/registry/autoscaling/horizontalpodautoscaler/strategy.go @@ -86,6 +86,7 @@ type autoscalerStatusStrategy struct { autoscalerStrategy } +// StatusStrategy is the default logic invoked when updating object status. var StatusStrategy = autoscalerStatusStrategy{Strategy} func (autoscalerStatusStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) { diff --git a/pkg/registry/batch/cronjob/strategy.go b/pkg/registry/batch/cronjob/strategy.go index a261bc548d..15780e795f 100644 --- a/pkg/registry/batch/cronjob/strategy.go +++ b/pkg/registry/batch/cronjob/strategy.go @@ -118,6 +118,7 @@ type cronJobStatusStrategy struct { cronJobStrategy } +// StatusStrategy is the default logic invoked when updating object status. var StatusStrategy = cronJobStatusStrategy{Strategy} func (cronJobStatusStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) { diff --git a/pkg/registry/core/resourcequota/strategy.go b/pkg/registry/core/resourcequota/strategy.go index 4e62ca4ec4..da259dfbff 100644 --- a/pkg/registry/core/resourcequota/strategy.go +++ b/pkg/registry/core/resourcequota/strategy.go @@ -87,6 +87,7 @@ type resourcequotaStatusStrategy struct { resourcequotaStrategy } +// StatusStrategy is the default logic invoked when updating object status. var StatusStrategy = resourcequotaStatusStrategy{Strategy} func (resourcequotaStatusStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) { diff --git a/pkg/registry/core/service/ipallocator/controller/repair_test.go b/pkg/registry/core/service/ipallocator/controller/repair_test.go index af2e59b22f..1410850e68 100644 --- a/pkg/registry/core/service/ipallocator/controller/repair_test.go +++ b/pkg/registry/core/service/ipallocator/controller/repair_test.go @@ -56,7 +56,7 @@ func TestRepair(t *testing.T) { item: &api.RangeAllocation{Range: "192.168.1.0/24"}, } _, cidr, _ := net.ParseCIDR(ipregistry.item.Range) - r := NewRepair(0, fakeClient.Core(), fakeClient.Core(), cidr, ipregistry) + r := NewRepair(0, fakeClient.CoreV1(), fakeClient.CoreV1(), cidr, ipregistry) if err := r.RunOnce(); err != nil { t.Fatal(err) @@ -69,7 +69,7 @@ func TestRepair(t *testing.T) { item: &api.RangeAllocation{Range: "192.168.1.0/24"}, updateErr: fmt.Errorf("test error"), } - r = NewRepair(0, fakeClient.Core(), fakeClient.Core(), cidr, ipregistry) + r = NewRepair(0, fakeClient.CoreV1(), fakeClient.CoreV1(), cidr, ipregistry) if err := r.RunOnce(); !strings.Contains(err.Error(), ": test error") { t.Fatal(err) } @@ -97,7 +97,7 @@ func TestRepairLeak(t *testing.T) { }, } - r := NewRepair(0, fakeClient.Core(), fakeClient.Core(), cidr, ipregistry) + r := NewRepair(0, fakeClient.CoreV1(), fakeClient.CoreV1(), cidr, ipregistry) // Run through the "leak detection holdoff" loops. for i := 0; i < (numRepairsBeforeLeakCleanup - 1); i++ { if err := r.RunOnce(); err != nil { @@ -170,7 +170,7 @@ func TestRepairWithExisting(t *testing.T) { Data: dst.Data, }, } - r := NewRepair(0, fakeClient.Core(), fakeClient.Core(), cidr, ipregistry) + r := NewRepair(0, fakeClient.CoreV1(), fakeClient.CoreV1(), cidr, ipregistry) if err := r.RunOnce(); err != nil { t.Fatal(err) } diff --git a/pkg/registry/core/service/portallocator/controller/repair_test.go b/pkg/registry/core/service/portallocator/controller/repair_test.go index 0df94f898d..5043685e33 100644 --- a/pkg/registry/core/service/portallocator/controller/repair_test.go +++ b/pkg/registry/core/service/portallocator/controller/repair_test.go @@ -56,7 +56,7 @@ func TestRepair(t *testing.T) { item: &api.RangeAllocation{Range: "100-200"}, } pr, _ := net.ParsePortRange(registry.item.Range) - r := NewRepair(0, fakeClient.Core(), fakeClient.Core(), *pr, registry) + r := NewRepair(0, fakeClient.CoreV1(), fakeClient.CoreV1(), *pr, registry) if err := r.RunOnce(); err != nil { t.Fatal(err) @@ -69,7 +69,7 @@ func TestRepair(t *testing.T) { item: &api.RangeAllocation{Range: "100-200"}, updateErr: fmt.Errorf("test error"), } - r = NewRepair(0, fakeClient.Core(), fakeClient.Core(), *pr, registry) + r = NewRepair(0, fakeClient.CoreV1(), fakeClient.CoreV1(), *pr, registry) if err := r.RunOnce(); !strings.Contains(err.Error(), ": test error") { t.Fatal(err) } @@ -97,7 +97,7 @@ func TestRepairLeak(t *testing.T) { }, } - r := NewRepair(0, fakeClient.Core(), fakeClient.Core(), *pr, registry) + r := NewRepair(0, fakeClient.CoreV1(), fakeClient.CoreV1(), *pr, registry) // Run through the "leak detection holdoff" loops. for i := 0; i < (numRepairsBeforeLeakCleanup - 1); i++ { if err := r.RunOnce(); err != nil { @@ -182,7 +182,7 @@ func TestRepairWithExisting(t *testing.T) { Data: dst.Data, }, } - r := NewRepair(0, fakeClient.Core(), fakeClient.Core(), *pr, registry) + r := NewRepair(0, fakeClient.CoreV1(), fakeClient.CoreV1(), *pr, registry) if err := r.RunOnce(); err != nil { t.Fatal(err) } diff --git a/pkg/registry/policy/poddisruptionbudget/strategy.go b/pkg/registry/policy/poddisruptionbudget/strategy.go index 36e940b6ac..0ee30f3b9d 100644 --- a/pkg/registry/policy/poddisruptionbudget/strategy.go +++ b/pkg/registry/policy/poddisruptionbudget/strategy.go @@ -98,6 +98,7 @@ type podDisruptionBudgetStatusStrategy struct { podDisruptionBudgetStrategy } +// StatusStrategy is the default logic invoked when updating object status. var StatusStrategy = podDisruptionBudgetStatusStrategy{Strategy} // PrepareForUpdate clears fields that are not allowed to be set by end users on update of status diff --git a/pkg/scheduler/internal/queue/scheduling_queue.go b/pkg/scheduler/internal/queue/scheduling_queue.go index 97e98526e4..1a3a087766 100644 --- a/pkg/scheduler/internal/queue/scheduling_queue.go +++ b/pkg/scheduler/internal/queue/scheduling_queue.go @@ -383,7 +383,8 @@ func (p *PriorityQueue) clearPodBackoff(pod *v1.Pod) { p.podBackoff.ClearPodBackoff(nsNameForPod(pod)) } -// isPodBackingOff returns whether a pod is currently undergoing backoff in the podBackoff structure +// isPodBackingOff returns true if a pod is still waiting for its backoff timer. +// If this returns true, the pod should not be re-tried. func (p *PriorityQueue) isPodBackingOff(pod *v1.Pod) bool { boTime, exists := p.podBackoff.GetBackoffTime(nsNameForPod(pod)) if !exists { @@ -411,11 +412,10 @@ func (p *PriorityQueue) SchedulingCycle() int64 { return p.schedulingCycle } -// AddUnschedulableIfNotPresent does nothing if the pod is present in any -// queue. If pod is unschedulable, it adds pod to unschedulable queue if -// p.moveRequestCycle > podSchedulingCycle or to backoff queue if p.moveRequestCycle -// <= podSchedulingCycle but pod is subject to backoff. In other cases, it adds pod to -// active queue. +// AddUnschedulableIfNotPresent inserts a pod that cannot be scheduled into +// the queue, unless it is already in the queue. Normally, PriorityQueue puts +// unschedulable pods in `unschedulableQ`. But if there has been a recent move +// request, then the pod is put in `podBackoffQ`. func (p *PriorityQueue) AddUnschedulableIfNotPresent(pod *v1.Pod, podSchedulingCycle int64) error { p.lock.Lock() defer p.lock.Unlock() @@ -430,30 +430,26 @@ func (p *PriorityQueue) AddUnschedulableIfNotPresent(pod *v1.Pod, podSchedulingC if _, exists, _ := p.podBackoffQ.Get(pInfo); exists { return fmt.Errorf("pod is already present in the backoffQ") } - if podSchedulingCycle > p.moveRequestCycle && isPodUnschedulable(pod) { - p.backoffPod(pod) - p.unschedulableQ.addOrUpdate(pInfo) - p.nominatedPods.add(pod, "") - return nil - } - // If a move request has been received and the pod is subject to backoff, move it to the BackoffQ. - if p.isPodBackingOff(pod) && isPodUnschedulable(pod) { - err := p.podBackoffQ.Add(pInfo) - if err != nil { - klog.Errorf("Error adding pod %v to the backoff queue: %v", pod.Name, err) - } else { - p.nominatedPods.add(pod, "") + // Every unschedulable pod is subject to backoff timers. + p.backoffPod(pod) + + // If a move request has been received, move it to the BackoffQ, otherwise move + // it to unschedulableQ. + if p.moveRequestCycle >= podSchedulingCycle { + if err := p.podBackoffQ.Add(pInfo); err != nil { + // TODO: Delete this klog call and log returned errors at the call site. + err = fmt.Errorf("error adding pod %v to the backoff queue: %v", pod.Name, err) + klog.Error(err) + return err } - return err + } else { + p.unschedulableQ.addOrUpdate(pInfo) } - err := p.activeQ.Add(pInfo) - if err == nil { - p.nominatedPods.add(pod, "") - p.cond.Broadcast() - } - return err + p.nominatedPods.add(pod, "") + return nil + } // flushBackoffQCompleted Moves all pods from backoffQ which have completed backoff in to activeQ diff --git a/pkg/scheduler/internal/queue/scheduling_queue_test.go b/pkg/scheduler/internal/queue/scheduling_queue_test.go index 9f7599c86f..c33f854772 100644 --- a/pkg/scheduler/internal/queue/scheduling_queue_test.go +++ b/pkg/scheduler/internal/queue/scheduling_queue_test.go @@ -184,16 +184,14 @@ func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) { q := NewPriorityQueue(nil) q.Add(&highPriNominatedPod) q.AddUnschedulableIfNotPresent(&highPriNominatedPod, q.SchedulingCycle()) // Must not add anything. - q.AddUnschedulableIfNotPresent(&medPriorityPod, q.SchedulingCycle()) // This should go to activeQ. q.AddUnschedulableIfNotPresent(&unschedulablePod, q.SchedulingCycle()) expectedNominatedPods := &nominatedPodMap{ nominatedPodToNode: map[types.UID]string{ - medPriorityPod.UID: "node1", unschedulablePod.UID: "node1", highPriNominatedPod.UID: "node1", }, nominatedPods: map[string][]*v1.Pod{ - "node1": {&highPriNominatedPod, &medPriorityPod, &unschedulablePod}, + "node1": {&highPriNominatedPod, &unschedulablePod}, }, } if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) { @@ -202,9 +200,6 @@ func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) { if p, err := q.Pop(); err != nil || p != &highPriNominatedPod { t.Errorf("Expected: %v after Pop, but got: %v", highPriNominatedPod.Name, p.Name) } - if p, err := q.Pop(); err != nil || p != &medPriorityPod { - t.Errorf("Expected: %v after Pop, but got: %v", medPriorityPod.Name, p.Name) - } if len(q.nominatedPods.nominatedPods) != 1 { t.Errorf("Expected nomindatePods to have one element: %v", q.nominatedPods) } @@ -213,11 +208,11 @@ func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) { } } -// TestPriorityQueue_AddUnschedulableIfNotPresent_Async tests scenario when +// TestPriorityQueue_AddUnschedulableIfNotPresent_Backoff tests scenario when // AddUnschedulableIfNotPresent is called asynchronously pods in and before // current scheduling cycle will be put back to activeQueue if we were trying // to schedule them when we received move request. -func TestPriorityQueue_AddUnschedulableIfNotPresent_Async(t *testing.T) { +func TestPriorityQueue_AddUnschedulableIfNotPresent_Backoff(t *testing.T) { q := NewPriorityQueue(nil) totalNum := 10 expectedPods := make([]v1.Pod, 0, totalNum) @@ -248,10 +243,14 @@ func TestPriorityQueue_AddUnschedulableIfNotPresent_Async(t *testing.T) { // move all pods to active queue when we were trying to schedule them q.MoveAllToActiveQueue() - moveReqChan := make(chan struct{}) - var wg sync.WaitGroup - wg.Add(totalNum - 1) - // mark pods[1] ~ pods[totalNum-1] as unschedulable, fire goroutines to add them back later + oldCycle := q.SchedulingCycle() + + firstPod, _ := q.Pop() + if !reflect.DeepEqual(&expectedPods[0], firstPod) { + t.Errorf("Unexpected pod. Expected: %v, got: %v", &expectedPods[0], firstPod) + } + + // mark pods[1] ~ pods[totalNum-1] as unschedulable and add them back for i := 1; i < totalNum; i++ { unschedulablePod := expectedPods[i].DeepCopy() unschedulablePod.Status = v1.PodStatus{ @@ -263,24 +262,15 @@ func TestPriorityQueue_AddUnschedulableIfNotPresent_Async(t *testing.T) { }, }, } - cycle := q.SchedulingCycle() - go func() { - <-moveReqChan - q.AddUnschedulableIfNotPresent(unschedulablePod, cycle) - wg.Done() - }() + + q.AddUnschedulableIfNotPresent(unschedulablePod, oldCycle) } - firstPod, _ := q.Pop() - if !reflect.DeepEqual(&expectedPods[0], firstPod) { - t.Errorf("Unexpected pod. Expected: %v, got: %v", &expectedPods[0], firstPod) - } - // close moveReqChan here to make sure q.AddUnschedulableIfNotPresent is called after another pod is popped - close(moveReqChan) - wg.Wait() - // all other pods should be in active queue again + + // Since there was a move request at the same cycle as "oldCycle", these pods + // should be in the backoff queue. for i := 1; i < totalNum; i++ { - if _, exists, _ := q.activeQ.Get(newPodInfoNoTimestamp(&expectedPods[i])); !exists { - t.Errorf("Expected %v to be added to activeQ.", expectedPods[i].Name) + if _, exists, _ := q.podBackoffQ.Get(newPodInfoNoTimestamp(&expectedPods[i])); !exists { + t.Errorf("Expected %v to be added to podBackoffQ.", expectedPods[i].Name) } } } diff --git a/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go b/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go index 14f0fc67f6..d900acb6e6 100644 --- a/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go +++ b/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go @@ -605,7 +605,7 @@ func TestInstallCSIDriverExistingAnnotation(t *testing.T) { } // Assert - nodeInfo, err := csiClient.Csi().CSINodeInfos().Get(nodeName, metav1.GetOptions{}) + nodeInfo, err := csiClient.CsiV1alpha1().CSINodeInfos().Get(nodeName, metav1.GetOptions{}) if err != nil { t.Errorf("error getting CSINodeInfo: %v", err) continue @@ -1018,7 +1018,7 @@ func test(t *testing.T, addNodeInfo bool, csiNodeInfoEnabled bool, testcases []t } /* CSINodeInfo validation */ - nodeInfo, err := csiClient.Csi().CSINodeInfos().Get(nodeName, metav1.GetOptions{}) + nodeInfo, err := csiClient.CsiV1alpha1().CSINodeInfos().Get(nodeName, metav1.GetOptions{}) if err != nil { t.Errorf("error getting CSINodeInfo: %v", err) continue diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index 47a10cf6f9..d357d0aec9 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -153,7 +153,7 @@ func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volu if kubeClient == nil { return nil, fmt.Errorf("failed to get kube client to initialize mounter") } - ep, err := kubeClient.Core().Endpoints(epNamespace).Get(epName, metav1.GetOptions{}) + ep, err := kubeClient.CoreV1().Endpoints(epNamespace).Get(epName, metav1.GetOptions{}) if err != nil { klog.Errorf("failed to get endpoint %s: %v", epName, err) diff --git a/pkg/volume/iscsi/BUILD b/pkg/volume/iscsi/BUILD index d076457c16..41381d0bb4 100644 --- a/pkg/volume/iscsi/BUILD +++ b/pkg/volume/iscsi/BUILD @@ -27,6 +27,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/keymutex:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], diff --git a/pkg/volume/iscsi/iscsi_util.go b/pkg/volume/iscsi/iscsi_util.go index 9e04ab2b5f..b63a4b3b5b 100644 --- a/pkg/volume/iscsi/iscsi_util.go +++ b/pkg/volume/iscsi/iscsi_util.go @@ -34,6 +34,7 @@ import ( "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" + utilexec "k8s.io/utils/exec" ) const ( @@ -51,6 +52,10 @@ const ( // How many seconds to wait for a multipath device if at least two paths are available. multipathDeviceTimeout = 10 + + // 'iscsiadm' error code stating that a session is logged in + // See https://github.com/open-iscsi/open-iscsi/blob/7d121d12ad6ba7783308c25ffd338a9fa0cc402b/include/iscsi_err.h#L37-L38 + iscsiadmErrorSessExists = 15 ) var ( @@ -872,8 +877,13 @@ func cloneIface(b iscsiDiskMounter, newIface string) error { // create new iface out, err = b.exec.Run("iscsiadm", "-m", "iface", "-I", newIface, "-o", "new") if err != nil { - lastErr = fmt.Errorf("iscsi: failed to create new iface: %s (%v)", string(out), err) - return lastErr + exit, ok := err.(utilexec.ExitError) + if ok && exit.ExitStatus() == iscsiadmErrorSessExists { + klog.Infof("iscsi: there is a session already logged in with iface %s", newIface) + } else { + lastErr = fmt.Errorf("iscsi: failed to create new iface: %s (%v)", string(out), err) + return lastErr + } } // update new iface records for key, val := range params { diff --git a/pkg/volume/rbd/rbd.go b/pkg/volume/rbd/rbd.go index 3678ebd9c1..d62dcbc174 100644 --- a/pkg/volume/rbd/rbd.go +++ b/pkg/volume/rbd/rbd.go @@ -472,7 +472,7 @@ func (plugin *rbdPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.Core().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) + secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err) return nil, err diff --git a/pkg/volume/storageos/storageos_test.go b/pkg/volume/storageos/storageos_test.go index 2b448bc916..9acd857b80 100644 --- a/pkg/volume/storageos/storageos_test.go +++ b/pkg/volume/storageos/storageos_test.go @@ -171,7 +171,7 @@ func TestPlugin(t *testing.T) { client := fake.NewSimpleClientset() - client.Core().Secrets("default").Create(&v1.Secret{ + client.CoreV1().Secrets("default").Create(&v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secretName, Namespace: "default", diff --git a/pkg/volume/util/operationexecutor/operation_generator.go b/pkg/volume/util/operationexecutor/operation_generator.go index 9cbea4f3d7..92c5435553 100644 --- a/pkg/volume/util/operationexecutor/operation_generator.go +++ b/pkg/volume/util/operationexecutor/operation_generator.go @@ -1535,6 +1535,9 @@ func isDeviceOpened(deviceToDetach AttachedVolume, mounter mount.Interface) (boo // TODO(dyzz): need to also add logic to check CSINodeInfo for Kubelet migration status func useCSIPlugin(vpm *volume.VolumePluginMgr, spec *volume.Spec) bool { + if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) { + return false + } if csilib.IsPVMigratable(spec.PersistentVolume) || csilib.IsInlineMigratable(spec.Volume) { migratable, err := vpm.IsPluginMigratableBySpec(spec) if err == nil && migratable { diff --git a/plugin/pkg/admission/namespace/autoprovision/admission.go b/plugin/pkg/admission/namespace/autoprovision/admission.go index 2c95f49e89..54e021df77 100644 --- a/plugin/pkg/admission/namespace/autoprovision/admission.go +++ b/plugin/pkg/admission/namespace/autoprovision/admission.go @@ -89,7 +89,7 @@ func (p *Provision) Admit(a admission.Attributes, o admission.ObjectInterfaces) Status: corev1.NamespaceStatus{}, } - _, err = p.client.Core().Namespaces().Create(namespace) + _, err = p.client.CoreV1().Namespaces().Create(namespace) if err != nil && !errors.IsAlreadyExists(err) { return admission.NewForbidden(a, err) } diff --git a/plugin/pkg/admission/namespace/exists/admission.go b/plugin/pkg/admission/namespace/exists/admission.go index 703081747d..e14ead89c6 100644 --- a/plugin/pkg/admission/namespace/exists/admission.go +++ b/plugin/pkg/admission/namespace/exists/admission.go @@ -75,7 +75,7 @@ func (e *Exists) Validate(a admission.Attributes, o admission.ObjectInterfaces) } // in case of latency in our caches, make a call direct to storage to verify that it truly exists or not - _, err = e.client.Core().Namespaces().Get(a.GetNamespace(), metav1.GetOptions{}) + _, err = e.client.CoreV1().Namespaces().Get(a.GetNamespace(), metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { return err diff --git a/plugin/pkg/admission/podnodeselector/admission.go b/plugin/pkg/admission/podnodeselector/admission.go index 94fe90ec7d..103397f757 100644 --- a/plugin/pkg/admission/podnodeselector/admission.go +++ b/plugin/pkg/admission/podnodeselector/admission.go @@ -216,7 +216,7 @@ func (p *podNodeSelector) ValidateInitialization() error { } func (p *podNodeSelector) defaultGetNamespace(name string) (*corev1.Namespace, error) { - namespace, err := p.client.Core().Namespaces().Get(name, metav1.GetOptions{}) + namespace, err := p.client.CoreV1().Namespaces().Get(name, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("namespace %s does not exist", name) } diff --git a/plugin/pkg/admission/resourcequota/resource_access.go b/plugin/pkg/admission/resourcequota/resource_access.go index f703d478b3..dfdde7e753 100644 --- a/plugin/pkg/admission/resourcequota/resource_access.go +++ b/plugin/pkg/admission/resourcequota/resource_access.go @@ -125,7 +125,7 @@ func (e *quotaAccessor) GetQuotas(namespace string) ([]corev1.ResourceQuota, err // If there is already in-flight List() for a given namespace, we should wait until // it is finished and cache is updated instead of doing the same, also to avoid // throttling - see #22422 for details. - liveList, err := e.client.Core().ResourceQuotas(namespace).List(metav1.ListOptions{}) + liveList, err := e.client.CoreV1().ResourceQuotas(namespace).List(metav1.ListOptions{}) if err != nil { return nil, err } diff --git a/plugin/pkg/admission/serviceaccount/admission.go b/plugin/pkg/admission/serviceaccount/admission.go index eb9418805b..17496491b5 100644 --- a/plugin/pkg/admission/serviceaccount/admission.go +++ b/plugin/pkg/admission/serviceaccount/admission.go @@ -304,7 +304,7 @@ func (s *serviceAccount) getServiceAccount(namespace string, name string) (*core if i != 0 { time.Sleep(retryInterval) } - serviceAccount, err := s.client.Core().ServiceAccounts(namespace).Get(name, metav1.GetOptions{}) + serviceAccount, err := s.client.CoreV1().ServiceAccounts(namespace).Get(name, metav1.GetOptions{}) if err == nil { return serviceAccount, nil } diff --git a/staging/publishing/import-restrictions.yaml b/staging/publishing/import-restrictions.yaml index 135d86454b..05eef5ca12 100644 --- a/staging/publishing/import-restrictions.yaml +++ b/staging/publishing/import-restrictions.yaml @@ -27,6 +27,7 @@ - k8s.io/api/core/v1 - k8s.io/cli-runtime/pkg/genericclioptions/printers - k8s.io/cli-runtime/pkg/genericclioptions/resource + - k8s.io/cli-runtime/pkg/kustomize - baseImportPath: "./vendor/k8s.io/apimachinery/" allowedImports: diff --git a/staging/src/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/staging/src/k8s.io/api/admissionregistration/v1beta1/generated.pb.go index 2ca3fa6524..9eddc3c2ac 100644 --- a/staging/src/k8s.io/api/admissionregistration/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -462,6 +462,11 @@ func (m *Webhook) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) i += copy(dAtA[i:], *m.SideEffects) } + if m.TimeoutSeconds != nil { + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + } return i, nil } @@ -647,6 +652,9 @@ func (m *Webhook) Size() (n int) { l = len(*m.SideEffects) n += 1 + l + sovGenerated(uint64(l)) } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } return n } @@ -771,6 +779,7 @@ func (this *Webhook) String() string { `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, `}`, }, "") return s @@ -1835,6 +1844,26 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { s := SideEffectClass(dAtA[iNdEx:postIndex]) m.SideEffects = &s iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2110,62 +2139,64 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 906 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x54, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0x8e, 0x37, 0x29, 0x49, 0x26, 0x89, 0x76, 0x3b, 0x80, 0x14, 0xaa, 0x95, 0x1d, 0xe5, 0x80, - 0x22, 0xa1, 0xb5, 0x49, 0x41, 0x08, 0x21, 0x10, 0xaa, 0x0b, 0x0b, 0x95, 0xba, 0xbb, 0x61, 0x0a, - 0xbb, 0x12, 0xe2, 0xc0, 0xc4, 0x79, 0x49, 0x86, 0xf8, 0x97, 0x66, 0xc6, 0x59, 0x7a, 0x43, 0xe2, - 0x1f, 0x40, 0x42, 0xfc, 0x0d, 0xfc, 0x15, 0xdc, 0x7b, 0xdc, 0x0b, 0x62, 0x4f, 0x16, 0x35, 0x67, - 0x0e, 0x5c, 0x7b, 0x42, 0x63, 0x3b, 0x71, 0xd2, 0x6c, 0xbb, 0xe9, 0x85, 0x03, 0x37, 0xcf, 0xf7, - 0xe6, 0xfb, 0xde, 0xfb, 0x9e, 0xdf, 0x1b, 0xf4, 0xc5, 0xec, 0x7d, 0x61, 0xb2, 0xc0, 0x9a, 0x45, - 0x43, 0xe0, 0x3e, 0x48, 0x10, 0xd6, 0x1c, 0xfc, 0x51, 0xc0, 0xad, 0x3c, 0x40, 0x43, 0x66, 0xd1, - 0x91, 0xc7, 0x84, 0x60, 0x81, 0xcf, 0x61, 0xc2, 0x84, 0xe4, 0x54, 0xb2, 0xc0, 0xb7, 0xe6, 0xfd, - 0x21, 0x48, 0xda, 0xb7, 0x26, 0xe0, 0x03, 0xa7, 0x12, 0x46, 0x66, 0xc8, 0x03, 0x19, 0xe0, 0x5e, - 0xc6, 0x34, 0x69, 0xc8, 0xcc, 0x17, 0x32, 0xcd, 0x9c, 0xb9, 0x77, 0x6f, 0xc2, 0xe4, 0x34, 0x1a, - 0x9a, 0x4e, 0xe0, 0x59, 0x93, 0x60, 0x12, 0x58, 0xa9, 0xc0, 0x30, 0x1a, 0xa7, 0xa7, 0xf4, 0x90, - 0x7e, 0x65, 0xc2, 0x7b, 0xef, 0x16, 0x25, 0x79, 0xd4, 0x99, 0x32, 0x1f, 0xf8, 0xa9, 0x15, 0xce, - 0x26, 0x0a, 0x10, 0x96, 0x07, 0x92, 0x5a, 0xf3, 0x8d, 0x72, 0xf6, 0xac, 0xab, 0x58, 0x3c, 0xf2, - 0x25, 0xf3, 0x60, 0x83, 0xf0, 0xde, 0xcb, 0x08, 0xc2, 0x99, 0x82, 0x47, 0x2f, 0xf3, 0xba, 0xbf, - 0x6b, 0xe8, 0xee, 0x83, 0x48, 0x52, 0xc9, 0xfc, 0xc9, 0x13, 0x18, 0x4e, 0x83, 0x60, 0x76, 0x18, - 0xf8, 0x63, 0x36, 0x89, 0x32, 0xdb, 0xf8, 0x5b, 0x54, 0x53, 0x45, 0x8e, 0xa8, 0xa4, 0x6d, 0xad, - 0xa3, 0xf5, 0x1a, 0xfb, 0x6f, 0x9b, 0x45, 0xaf, 0x96, 0xb9, 0xcc, 0x70, 0x36, 0x51, 0x80, 0x30, - 0xd5, 0x6d, 0x73, 0xde, 0x37, 0x1f, 0x0d, 0xbf, 0x03, 0x47, 0x3e, 0x00, 0x49, 0x6d, 0x7c, 0x16, - 0x1b, 0xa5, 0x24, 0x36, 0x50, 0x81, 0x91, 0xa5, 0x2a, 0x3e, 0x41, 0xb5, 0x3c, 0xb3, 0x68, 0xdf, - 0xea, 0x94, 0x7b, 0x8d, 0xfd, 0xbe, 0xb9, 0xed, 0xdf, 0x30, 0x73, 0xa6, 0x5d, 0x51, 0x29, 0x48, - 0xed, 0x69, 0x2e, 0xd4, 0xfd, 0x5b, 0x43, 0x9d, 0xeb, 0x7c, 0x1d, 0x33, 0x21, 0xf1, 0x37, 0x1b, - 0xde, 0xcc, 0xed, 0xbc, 0x29, 0x76, 0xea, 0xec, 0x4e, 0xee, 0xac, 0xb6, 0x40, 0x56, 0x7c, 0xcd, - 0xd0, 0x0e, 0x93, 0xe0, 0x2d, 0x4c, 0xdd, 0xdf, 0xde, 0xd4, 0x75, 0x85, 0xdb, 0xad, 0x3c, 0xe5, - 0xce, 0x91, 0x12, 0x27, 0x59, 0x8e, 0xee, 0xcf, 0x1a, 0xaa, 0x90, 0xc8, 0x05, 0xfc, 0x16, 0xaa, - 0xd3, 0x90, 0x7d, 0xc6, 0x83, 0x28, 0x14, 0x6d, 0xad, 0x53, 0xee, 0xd5, 0xed, 0x56, 0x12, 0x1b, - 0xf5, 0x83, 0xc1, 0x51, 0x06, 0x92, 0x22, 0x8e, 0xfb, 0xa8, 0x41, 0x43, 0xf6, 0x18, 0xb8, 0x2a, - 0x25, 0x2b, 0xb4, 0x6e, 0xdf, 0x4e, 0x62, 0xa3, 0x71, 0x30, 0x38, 0x5a, 0xc0, 0x64, 0xf5, 0x8e, - 0xd2, 0xe7, 0x20, 0x82, 0x88, 0x3b, 0x20, 0xda, 0xe5, 0x42, 0x9f, 0x2c, 0x40, 0x52, 0xc4, 0xbb, - 0xbf, 0x6a, 0x08, 0xab, 0xaa, 0x9e, 0x30, 0x39, 0x7d, 0x14, 0x42, 0xe6, 0x40, 0xe0, 0x8f, 0x11, - 0x0a, 0x96, 0xa7, 0xbc, 0x48, 0x23, 0x9d, 0x8f, 0x25, 0x7a, 0x11, 0x1b, 0xad, 0xe5, 0xe9, 0xcb, - 0xd3, 0x10, 0xc8, 0x0a, 0x05, 0x0f, 0x50, 0x85, 0x47, 0x2e, 0xb4, 0x6f, 0x6d, 0xfc, 0xb4, 0x97, - 0x74, 0x56, 0x15, 0x63, 0x37, 0xf3, 0x0e, 0xa6, 0x0d, 0x23, 0xa9, 0x52, 0xf7, 0x47, 0x0d, 0xdd, - 0x39, 0x01, 0x3e, 0x67, 0x0e, 0x10, 0x18, 0x03, 0x07, 0xdf, 0x01, 0x6c, 0xa1, 0xba, 0x4f, 0x3d, - 0x10, 0x21, 0x75, 0x20, 0x1d, 0x90, 0xba, 0xbd, 0x9b, 0x73, 0xeb, 0x0f, 0x17, 0x01, 0x52, 0xdc, - 0xc1, 0x1d, 0x54, 0x51, 0x87, 0xb4, 0xae, 0x7a, 0x91, 0x47, 0xdd, 0x25, 0x69, 0x04, 0xdf, 0x45, - 0x95, 0x90, 0xca, 0x69, 0xbb, 0x9c, 0xde, 0xa8, 0xa9, 0xe8, 0x80, 0xca, 0x29, 0x49, 0xd1, 0xee, - 0x1f, 0x1a, 0xd2, 0x1f, 0x53, 0x97, 0x8d, 0xfe, 0x77, 0xfb, 0xf8, 0x8f, 0x86, 0xba, 0xd7, 0x3b, - 0xfb, 0x0f, 0x36, 0xd2, 0x5b, 0xdf, 0xc8, 0xcf, 0xb7, 0xb7, 0x75, 0x7d, 0xe9, 0x57, 0xec, 0xe4, - 0x2f, 0x15, 0x54, 0xcd, 0xaf, 0x2f, 0x27, 0x43, 0xbb, 0x72, 0x32, 0x9e, 0xa2, 0xa6, 0xe3, 0x32, - 0xf0, 0x65, 0x26, 0x9d, 0xcf, 0xf6, 0x47, 0x37, 0x6e, 0xfd, 0xe1, 0x8a, 0x88, 0xfd, 0x5a, 0x9e, - 0xa8, 0xb9, 0x8a, 0x92, 0xb5, 0x44, 0x98, 0xa2, 0x1d, 0xb5, 0x02, 0xd9, 0x36, 0x37, 0xf6, 0x3f, - 0xbc, 0xd9, 0x36, 0xad, 0xaf, 0x76, 0xd1, 0x09, 0x15, 0x13, 0x24, 0x53, 0xc6, 0xc7, 0xa8, 0x35, - 0xa6, 0xcc, 0x8d, 0x38, 0x0c, 0x02, 0x97, 0x39, 0xa7, 0xed, 0x4a, 0xda, 0x86, 0x37, 0x93, 0xd8, - 0x68, 0xdd, 0x5f, 0x0d, 0x5c, 0xc4, 0xc6, 0xee, 0x1a, 0x90, 0xae, 0xfe, 0x3a, 0x19, 0x7f, 0x8f, - 0x76, 0x97, 0x2b, 0x77, 0x02, 0x2e, 0x38, 0x32, 0xe0, 0xed, 0x9d, 0xb4, 0x5d, 0xef, 0x6c, 0x39, - 0x2d, 0x74, 0x08, 0xee, 0x82, 0x6a, 0xbf, 0x9e, 0xc4, 0xc6, 0xee, 0xc3, 0xcb, 0x8a, 0x64, 0x33, - 0x09, 0xfe, 0x04, 0x35, 0x04, 0x1b, 0xc1, 0xa7, 0xe3, 0x31, 0x38, 0x52, 0xb4, 0x5f, 0x49, 0x5d, - 0x74, 0xd5, 0x7b, 0x79, 0x52, 0xc0, 0x17, 0xb1, 0x71, 0xbb, 0x38, 0x1e, 0xba, 0x54, 0x08, 0xb2, - 0x4a, 0xeb, 0xfe, 0xa6, 0xa1, 0x57, 0x5f, 0xf0, 0xb3, 0x30, 0x45, 0x55, 0x91, 0x3d, 0x41, 0xf9, - 0xec, 0x7f, 0xb0, 0xfd, 0xaf, 0xb8, 0xfc, 0x76, 0xd9, 0x8d, 0x24, 0x36, 0xaa, 0x0b, 0x74, 0xa1, - 0x8b, 0x7b, 0xa8, 0xe6, 0x50, 0x3b, 0xf2, 0x47, 0xf9, 0xe3, 0xd9, 0xb4, 0x9b, 0x6a, 0x57, 0x0e, - 0x0f, 0x32, 0x8c, 0x2c, 0xa3, 0xf8, 0x0d, 0x54, 0x8e, 0xb8, 0x9b, 0xbf, 0x53, 0xd5, 0x24, 0x36, - 0xca, 0x5f, 0x91, 0x63, 0xa2, 0x30, 0xfb, 0xde, 0xd9, 0xb9, 0x5e, 0x7a, 0x76, 0xae, 0x97, 0x9e, - 0x9f, 0xeb, 0xa5, 0x1f, 0x12, 0x5d, 0x3b, 0x4b, 0x74, 0xed, 0x59, 0xa2, 0x6b, 0xcf, 0x13, 0x5d, - 0xfb, 0x33, 0xd1, 0xb5, 0x9f, 0xfe, 0xd2, 0x4b, 0x5f, 0x57, 0xf3, 0xd2, 0xfe, 0x0d, 0x00, 0x00, - 0xff, 0xff, 0x85, 0x06, 0x8c, 0x7f, 0xae, 0x09, 0x00, 0x00, + // 936 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x55, 0x41, 0x8f, 0xdb, 0x44, + 0x14, 0x5e, 0x37, 0x59, 0x92, 0x4c, 0x92, 0xb6, 0x3b, 0x80, 0x64, 0xaa, 0xca, 0x8e, 0x72, 0x40, + 0x91, 0x50, 0x6d, 0xb2, 0x20, 0x84, 0x2a, 0x10, 0x5a, 0x2f, 0x14, 0x56, 0xda, 0xb6, 0x61, 0x52, + 0x5a, 0x09, 0x71, 0x60, 0xe2, 0xbc, 0x24, 0x43, 0x1c, 0x8f, 0xe5, 0x19, 0xa7, 0xec, 0x0d, 0x89, + 0x3f, 0x80, 0xc4, 0x8f, 0xe0, 0x57, 0x70, 0xdf, 0x63, 0x39, 0x20, 0x7a, 0xb2, 0x58, 0x73, 0xe6, + 0xc0, 0x75, 0x4f, 0x68, 0x6c, 0x27, 0x4e, 0x36, 0xdd, 0x6d, 0x7a, 0xe1, 0xc0, 0x2d, 0xf3, 0xbd, + 0xf7, 0x7d, 0xef, 0x7d, 0xcf, 0xf3, 0x26, 0xe8, 0xcb, 0xe9, 0x87, 0xc2, 0x62, 0xdc, 0x9e, 0x46, + 0x03, 0x08, 0x7d, 0x90, 0x20, 0xec, 0x39, 0xf8, 0x43, 0x1e, 0xda, 0x79, 0x80, 0x06, 0xcc, 0xa6, + 0xc3, 0x19, 0x13, 0x82, 0x71, 0x3f, 0x84, 0x31, 0x13, 0x32, 0xa4, 0x92, 0x71, 0xdf, 0x9e, 0x77, + 0x07, 0x20, 0x69, 0xd7, 0x1e, 0x83, 0x0f, 0x21, 0x95, 0x30, 0xb4, 0x82, 0x90, 0x4b, 0x8e, 0x3b, + 0x19, 0xd3, 0xa2, 0x01, 0xb3, 0x5e, 0xc8, 0xb4, 0x72, 0xe6, 0xad, 0x3b, 0x63, 0x26, 0x27, 0xd1, + 0xc0, 0x72, 0xf9, 0xcc, 0x1e, 0xf3, 0x31, 0xb7, 0x53, 0x81, 0x41, 0x34, 0x4a, 0x4f, 0xe9, 0x21, + 0xfd, 0x95, 0x09, 0xdf, 0x7a, 0xbf, 0x68, 0x69, 0x46, 0xdd, 0x09, 0xf3, 0x21, 0x3c, 0xb1, 0x83, + 0xe9, 0x58, 0x01, 0xc2, 0x9e, 0x81, 0xa4, 0xf6, 0x7c, 0xa3, 0x9d, 0x5b, 0xf6, 0x65, 0xac, 0x30, + 0xf2, 0x25, 0x9b, 0xc1, 0x06, 0xe1, 0x83, 0x97, 0x11, 0x84, 0x3b, 0x81, 0x19, 0xbd, 0xc8, 0x6b, + 0xff, 0xae, 0xa1, 0xdb, 0xf7, 0x23, 0x49, 0x25, 0xf3, 0xc7, 0x4f, 0x60, 0x30, 0xe1, 0x7c, 0x7a, + 0xc8, 0xfd, 0x11, 0x1b, 0x47, 0x99, 0x6d, 0xfc, 0x2d, 0xaa, 0xaa, 0x26, 0x87, 0x54, 0x52, 0x5d, + 0x6b, 0x69, 0x9d, 0xfa, 0xfe, 0xbb, 0x56, 0x31, 0xab, 0x65, 0x2d, 0x2b, 0x98, 0x8e, 0x15, 0x20, + 0x2c, 0x95, 0x6d, 0xcd, 0xbb, 0xd6, 0xc3, 0xc1, 0x77, 0xe0, 0xca, 0xfb, 0x20, 0xa9, 0x83, 0x4f, + 0x63, 0x73, 0x27, 0x89, 0x4d, 0x54, 0x60, 0x64, 0xa9, 0x8a, 0xfb, 0xa8, 0x9a, 0x57, 0x16, 0xfa, + 0xb5, 0x56, 0xa9, 0x53, 0xdf, 0xef, 0x5a, 0xdb, 0x7e, 0x0d, 0x2b, 0x67, 0x3a, 0x65, 0x55, 0x82, + 0x54, 0x9f, 0xe6, 0x42, 0xed, 0xbf, 0x35, 0xd4, 0xba, 0xca, 0xd7, 0x31, 0x13, 0x12, 0x7f, 0xb3, + 0xe1, 0xcd, 0xda, 0xce, 0x9b, 0x62, 0xa7, 0xce, 0x6e, 0xe6, 0xce, 0xaa, 0x0b, 0x64, 0xc5, 0xd7, + 0x14, 0xed, 0x32, 0x09, 0xb3, 0x85, 0xa9, 0x7b, 0xdb, 0x9b, 0xba, 0xaa, 0x71, 0xa7, 0x99, 0x97, + 0xdc, 0x3d, 0x52, 0xe2, 0x24, 0xab, 0xd1, 0xfe, 0x59, 0x43, 0x65, 0x12, 0x79, 0x80, 0xdf, 0x41, + 0x35, 0x1a, 0xb0, 0xcf, 0x43, 0x1e, 0x05, 0x42, 0xd7, 0x5a, 0xa5, 0x4e, 0xcd, 0x69, 0x26, 0xb1, + 0x59, 0x3b, 0xe8, 0x1d, 0x65, 0x20, 0x29, 0xe2, 0xb8, 0x8b, 0xea, 0x34, 0x60, 0x8f, 0x21, 0x54, + 0xad, 0x64, 0x8d, 0xd6, 0x9c, 0x1b, 0x49, 0x6c, 0xd6, 0x0f, 0x7a, 0x47, 0x0b, 0x98, 0xac, 0xe6, + 0x28, 0xfd, 0x10, 0x04, 0x8f, 0x42, 0x17, 0x84, 0x5e, 0x2a, 0xf4, 0xc9, 0x02, 0x24, 0x45, 0xbc, + 0xfd, 0x8b, 0x86, 0xb0, 0xea, 0xea, 0x09, 0x93, 0x93, 0x87, 0x01, 0x64, 0x0e, 0x04, 0xfe, 0x04, + 0x21, 0xbe, 0x3c, 0xe5, 0x4d, 0x9a, 0xe9, 0xfd, 0x58, 0xa2, 0xe7, 0xb1, 0xd9, 0x5c, 0x9e, 0x1e, + 0x9d, 0x04, 0x40, 0x56, 0x28, 0xb8, 0x87, 0xca, 0x61, 0xe4, 0x81, 0x7e, 0x6d, 0xe3, 0xa3, 0xbd, + 0x64, 0xb2, 0xaa, 0x19, 0xa7, 0x91, 0x4f, 0x30, 0x1d, 0x18, 0x49, 0x95, 0xda, 0x3f, 0x6a, 0xe8, + 0x66, 0x1f, 0xc2, 0x39, 0x73, 0x81, 0xc0, 0x08, 0x42, 0xf0, 0x5d, 0xc0, 0x36, 0xaa, 0xf9, 0x74, + 0x06, 0x22, 0xa0, 0x2e, 0xa4, 0x17, 0xa4, 0xe6, 0xec, 0xe5, 0xdc, 0xda, 0x83, 0x45, 0x80, 0x14, + 0x39, 0xb8, 0x85, 0xca, 0xea, 0x90, 0xf6, 0x55, 0x2b, 0xea, 0xa8, 0x5c, 0x92, 0x46, 0xf0, 0x6d, + 0x54, 0x0e, 0xa8, 0x9c, 0xe8, 0xa5, 0x34, 0xa3, 0xaa, 0xa2, 0x3d, 0x2a, 0x27, 0x24, 0x45, 0xdb, + 0x7f, 0x68, 0xc8, 0x78, 0x4c, 0x3d, 0x36, 0xfc, 0xdf, 0xed, 0xe3, 0x3f, 0x1a, 0x6a, 0x5f, 0xed, + 0xec, 0x3f, 0xd8, 0xc8, 0xd9, 0xfa, 0x46, 0x7e, 0xb1, 0xbd, 0xad, 0xab, 0x5b, 0xbf, 0x64, 0x27, + 0x7f, 0x2b, 0xa3, 0x4a, 0x9e, 0xbe, 0xbc, 0x19, 0xda, 0xa5, 0x37, 0xe3, 0x29, 0x6a, 0xb8, 0x1e, + 0x03, 0x5f, 0x66, 0xd2, 0xf9, 0xdd, 0xfe, 0xf8, 0x95, 0x47, 0x7f, 0xb8, 0x22, 0xe2, 0xbc, 0x91, + 0x17, 0x6a, 0xac, 0xa2, 0x64, 0xad, 0x10, 0xa6, 0x68, 0x57, 0xad, 0x40, 0xb6, 0xcd, 0xf5, 0xfd, + 0x8f, 0x5e, 0x6d, 0x9b, 0xd6, 0x57, 0xbb, 0x98, 0x84, 0x8a, 0x09, 0x92, 0x29, 0xe3, 0x63, 0xd4, + 0x1c, 0x51, 0xe6, 0x45, 0x21, 0xf4, 0xb8, 0xc7, 0xdc, 0x13, 0xbd, 0x9c, 0x8e, 0xe1, 0xed, 0x24, + 0x36, 0x9b, 0xf7, 0x56, 0x03, 0xe7, 0xb1, 0xb9, 0xb7, 0x06, 0xa4, 0xab, 0xbf, 0x4e, 0xc6, 0xdf, + 0xa3, 0xbd, 0xe5, 0xca, 0xf5, 0xc1, 0x03, 0x57, 0xf2, 0x50, 0xdf, 0x4d, 0xc7, 0xf5, 0xde, 0x96, + 0xb7, 0x85, 0x0e, 0xc0, 0x5b, 0x50, 0x9d, 0x37, 0x93, 0xd8, 0xdc, 0x7b, 0x70, 0x51, 0x91, 0x6c, + 0x16, 0xc1, 0x9f, 0xa2, 0xba, 0x60, 0x43, 0xf8, 0x6c, 0x34, 0x02, 0x57, 0x0a, 0xfd, 0xb5, 0xd4, + 0x45, 0x5b, 0xbd, 0x97, 0xfd, 0x02, 0x3e, 0x8f, 0xcd, 0x1b, 0xc5, 0xf1, 0xd0, 0xa3, 0x42, 0x90, + 0x55, 0x1a, 0xbe, 0x8b, 0xae, 0xab, 0xbf, 0x64, 0x1e, 0xc9, 0x3e, 0xb8, 0xdc, 0x1f, 0x0a, 0xbd, + 0xd2, 0xd2, 0x3a, 0xbb, 0x0e, 0x4e, 0x62, 0xf3, 0xfa, 0xa3, 0xb5, 0x08, 0xb9, 0x90, 0xd9, 0xfe, + 0x55, 0x43, 0xaf, 0xbf, 0xe0, 0x43, 0x63, 0x8a, 0x2a, 0x22, 0x7b, 0xbe, 0xf2, 0xbd, 0xb9, 0xbb, + 0xfd, 0x67, 0xbc, 0xf8, 0xee, 0x39, 0xf5, 0x24, 0x36, 0x2b, 0x0b, 0x74, 0xa1, 0x8b, 0x3b, 0xa8, + 0xea, 0x52, 0x27, 0xf2, 0x87, 0xf9, 0xc3, 0xdb, 0x70, 0x1a, 0x6a, 0xcf, 0x0e, 0x0f, 0x32, 0x8c, + 0x2c, 0xa3, 0xf8, 0x2d, 0x54, 0x8a, 0x42, 0x2f, 0x7f, 0xe3, 0x2a, 0x49, 0x6c, 0x96, 0xbe, 0x22, + 0xc7, 0x44, 0x61, 0xce, 0x9d, 0xd3, 0x33, 0x63, 0xe7, 0xd9, 0x99, 0xb1, 0xf3, 0xfc, 0xcc, 0xd8, + 0xf9, 0x21, 0x31, 0xb4, 0xd3, 0xc4, 0xd0, 0x9e, 0x25, 0x86, 0xf6, 0x3c, 0x31, 0xb4, 0x3f, 0x13, + 0x43, 0xfb, 0xe9, 0x2f, 0x63, 0xe7, 0xeb, 0x4a, 0xde, 0xda, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x85, 0xc4, 0x5b, 0xa6, 0xea, 0x09, 0x00, 0x00, } diff --git a/staging/src/k8s.io/api/admissionregistration/v1beta1/generated.proto b/staging/src/k8s.io/api/admissionregistration/v1beta1/generated.proto index 1c40ae530d..a0278cee70 100644 --- a/staging/src/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/staging/src/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -217,6 +217,14 @@ message Webhook { // sideEffects == Unknown or Some. Defaults to Unknown. // +optional optional string sideEffects = 6; + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 30 seconds. + // +optional + optional int32 timeoutSeconds = 7; } // WebhookClientConfig contains the information to make a TLS diff --git a/staging/src/k8s.io/api/admissionregistration/v1beta1/types.go b/staging/src/k8s.io/api/admissionregistration/v1beta1/types.go index 49d94ec0eb..7968372b39 100644 --- a/staging/src/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/staging/src/k8s.io/api/admissionregistration/v1beta1/types.go @@ -216,6 +216,14 @@ type Webhook struct { // sideEffects == Unknown or Some. Defaults to Unknown. // +optional SideEffects *SideEffectClass `json:"sideEffects,omitempty" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"` + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 30 seconds. + // +optional + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,7,opt,name=timeoutSeconds"` } // RuleWithOperations is a tuple of Operations and Resources. It is recommended to make diff --git a/staging/src/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go index e97628aab7..07df6477ea 100644 --- a/staging/src/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -106,6 +106,7 @@ var map_Webhook = map[string]string{ "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", "sideEffects": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", + "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", } func (Webhook) SwaggerDoc() map[string]string { diff --git a/staging/src/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go index c6867be122..b955e4b56a 100644 --- a/staging/src/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go @@ -257,6 +257,11 @@ func (in *Webhook) DeepCopyInto(out *Webhook) { *out = new(SideEffectClass) **out = **in } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } return } diff --git a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/clientset.go b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/clientset.go index 92ff158191..f6ffae326d 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/clientset.go +++ b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/clientset.go @@ -28,8 +28,6 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface CrV1() crv1.CrV1Interface - // Deprecated: please explicitly pick a version if possible. - Cr() crv1.CrV1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -44,12 +42,6 @@ func (c *Clientset) CrV1() crv1.CrV1Interface { return c.crV1 } -// Deprecated: Cr retrieves the default version of CrClient. -// Please explicitly pick a version. -func (c *Clientset) Cr() crv1.CrV1Interface { - return c.crV1 -} - // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { diff --git a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/fake/clientset_generated.go b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/fake/clientset_generated.go index 28089890db..152f5186b6 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -75,8 +75,3 @@ var _ clientset.Interface = &Clientset{} func (c *Clientset) CrV1() crv1.CrV1Interface { return &fakecrv1.FakeCrV1{Fake: &c.Fake} } - -// Cr retrieves the CrV1Client -func (c *Clientset) Cr() crv1.CrV1Interface { - return &fakecrv1.FakeCrV1{Fake: &c.Fake} -} diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go index 63fcb510ab..aa2dbcbb08 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go @@ -28,8 +28,6 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface ApiextensionsV1beta1() apiextensionsv1beta1.ApiextensionsV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Apiextensions() apiextensionsv1beta1.ApiextensionsV1beta1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -44,12 +42,6 @@ func (c *Clientset) ApiextensionsV1beta1() apiextensionsv1beta1.ApiextensionsV1b return c.apiextensionsV1beta1 } -// Deprecated: Apiextensions retrieves the default version of ApiextensionsClient. -// Please explicitly pick a version. -func (c *Clientset) Apiextensions() apiextensionsv1beta1.ApiextensionsV1beta1Interface { - return c.apiextensionsV1beta1 -} - // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/clientset_generated.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/clientset_generated.go index e65fe63eae..b10fe4c3c7 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/clientset_generated.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/clientset_generated.go @@ -75,8 +75,3 @@ var _ clientset.Interface = &Clientset{} func (c *Clientset) ApiextensionsV1beta1() apiextensionsv1beta1.ApiextensionsV1beta1Interface { return &fakeapiextensionsv1beta1.FakeApiextensionsV1beta1{Fake: &c.Fake} } - -// Apiextensions retrieves the ApiextensionsV1beta1Client -func (c *Clientset) Apiextensions() apiextensionsv1beta1.ApiextensionsV1beta1Interface { - return &fakeapiextensionsv1beta1.FakeApiextensionsV1beta1{Fake: &c.Fake} -} diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures/resources.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures/resources.go index 7230725d4d..dcd5277e7a 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures/resources.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures/resources.go @@ -355,7 +355,7 @@ func isWatchCachePrimed(crd *apiextensionsv1beta1.CustomResourceDefinition, dyna // DeleteCustomResourceDefinition deletes a CRD and waits until it disappears from discovery. func DeleteCustomResourceDefinition(crd *apiextensionsv1beta1.CustomResourceDefinition, apiExtensionsClient clientset.Interface) error { - if err := apiExtensionsClient.Apiextensions().CustomResourceDefinitions().Delete(crd.Name, nil); err != nil { + if err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Delete(crd.Name, nil); err != nil { return err } for _, version := range servedVersions(crd) { diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion.go index f00e0bc015..11f704f736 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion.go @@ -37,5 +37,7 @@ func ConvertToGVK(obj runtime.Object, gvk schema.GroupVersionKind, o admission.O if err != nil { return nil, err } + // Explicitly set the GVK + out.GetObjectKind().SetGroupVersionKind(gvk) return out, nil } diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion_test.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion_test.go index 5bb70d1b4b..5482b7b5ab 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion_test.go @@ -62,6 +62,10 @@ func TestConvertToGVK(t *testing.T) { }, gvk: examplev1.SchemeGroupVersion.WithKind("Pod"), expectedObj: &examplev1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "example.apiserver.k8s.io/v1", + Kind: "Pod", + }, ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Labels: map[string]string{ @@ -87,6 +91,10 @@ func TestConvertToGVK(t *testing.T) { }, gvk: example2v1.SchemeGroupVersion.WithKind("ReplicaSet"), expectedObj: &example2v1.ReplicaSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "example2.apiserver.k8s.io/v1", + Kind: "ReplicaSet", + }, ObjectMeta: metav1.ObjectMeta{ Name: "rs1", Labels: map[string]string{ diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go index 44490d1b4f..a2b3674955 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go @@ -101,7 +101,11 @@ func (a *mutatingDispatcher) callAttrMutatingHook(ctx context.Context, h *v1beta return &webhook.ErrCallingWebhook{WebhookName: h.Name, Reason: err} } response := &admissionv1beta1.AdmissionReview{} - if err := client.Post().Context(ctx).Body(&request).Do().Into(response); err != nil { + r := client.Post().Context(ctx).Body(&request) + if h.TimeoutSeconds != nil { + r = r.Timeout(time.Duration(*h.TimeoutSeconds) * time.Second) + } + if err := r.Do().Into(response); err != nil { return &webhook.ErrCallingWebhook{WebhookName: h.Name, Reason: err} } diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go index d779be7b83..2a70e4e64e 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go @@ -115,7 +115,11 @@ func (d *validatingDispatcher) callHook(ctx context.Context, h *v1beta1.Webhook, return &webhook.ErrCallingWebhook{WebhookName: h.Name, Reason: err} } response := &admissionv1beta1.AdmissionReview{} - if err := client.Post().Context(ctx).Body(&request).Do().Into(response); err != nil { + r := client.Post().Context(ctx).Body(&request) + if h.TimeoutSeconds != nil { + r = r.Timeout(time.Duration(*h.TimeoutSeconds) * time.Second) + } + if err := r.Do().Into(response); err != nil { return &webhook.ErrCallingWebhook{WebhookName: h.Name, Reason: err} } diff --git a/staging/src/k8s.io/apiserver/pkg/audit/BUILD b/staging/src/k8s.io/apiserver/pkg/audit/BUILD index f71da4d424..f6923a2828 100644 --- a/staging/src/k8s.io/apiserver/pkg/audit/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/audit/BUILD @@ -19,6 +19,7 @@ go_library( importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/audit", importpath = "k8s.io/apiserver/pkg/audit", deps = [ + "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/audit/request.go b/staging/src/k8s.io/apiserver/pkg/audit/request.go index d4b12770ea..7ddf764ba6 100644 --- a/staging/src/k8s.io/apiserver/pkg/audit/request.go +++ b/staging/src/k8s.io/apiserver/pkg/audit/request.go @@ -20,13 +20,13 @@ import ( "bytes" "fmt" "net/http" + "reflect" "time" "github.com/pborman/uuid" "k8s.io/klog" - "reflect" - + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -117,8 +117,9 @@ func LogRequestObject(ae *auditinternal.Event, obj runtime.Object, gvr schema.Gr if ae.ObjectRef == nil { ae.ObjectRef = &auditinternal.ObjectReference{} } - if acc, ok := obj.(metav1.ObjectMetaAccessor); ok { - meta := acc.GetObjectMeta() + + // meta.Accessor is more general than ObjectMetaAccessor, but if it fails, we can just skip setting these bits + if meta, err := meta.Accessor(obj); err == nil { if len(ae.ObjectRef.Namespace) == 0 { ae.ObjectRef.Namespace = meta.GetNamespace() } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/BUILD index 6abb2845a9..1c2892a40e 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/BUILD @@ -42,7 +42,6 @@ go_test( deps = [ "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go index a890e93250..08c9e0eb59 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go @@ -235,6 +235,9 @@ var stripSet = fieldpath.NewSet( fieldpath.MakePathOrDie("metadata", "creationTimestamp"), fieldpath.MakePathOrDie("metadata", "selfLink"), fieldpath.MakePathOrDie("metadata", "uid"), + fieldpath.MakePathOrDie("metadata", "clusterName"), + fieldpath.MakePathOrDie("metadata", "generation"), + fieldpath.MakePathOrDie("metadata", "managedFields"), fieldpath.MakePathOrDie("metadata", "resourceVersion"), ) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager_test.go index c4edbc2ada..82aa6a84a3 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager_test.go @@ -19,11 +19,9 @@ package fieldmanager_test import ( "errors" "testing" - "time" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" @@ -71,14 +69,7 @@ func TestFieldManagerCreation(t *testing.T) { func TestApplyStripsFields(t *testing.T) { f := NewTestFieldManager(t) - obj := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "a", - Namespace: "a", - CreationTimestamp: metav1.Time{Time: time.Time{}}, - SelfLink: "a", - }, - } + obj := &corev1.Pod{} newObj, err := f.Apply(obj, []byte(`{ "apiVersion": "v1", @@ -89,6 +80,20 @@ func TestApplyStripsFields(t *testing.T) { "creationTimestamp": "2016-05-19T09:59:00Z", "selfLink": "b", "uid": "b", + "clusterName": "b", + "generation": 0, + "managedFields": [{ + "manager": "apply", + "operation": "Apply", + "apiVersion": "v1", + "fields": { + "f:metadata": { + "f:labels": { + "f:test-label": {} + } + } + } + }], "resourceVersion": "b" } }`), false) @@ -108,14 +113,7 @@ func TestApplyStripsFields(t *testing.T) { func TestApplyDoesNotStripLabels(t *testing.T) { f := NewTestFieldManager(t) - obj := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "a", - Namespace: "a", - CreationTimestamp: metav1.Time{Time: time.Time{}}, - SelfLink: "a", - }, - } + obj := &corev1.Pod{} newObj, err := f.Apply(obj, []byte(`{ "apiVersion": "v1", diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/BUILD index 15ec1a3f62..64d62276e9 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/BUILD @@ -49,7 +49,6 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/util/proto/testing:go_default_library", "//vendor/sigs.k8s.io/structured-merge-diff/fieldpath:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/typeconverter_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/typeconverter_test.go index 093ce64c94..c36d686545 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/typeconverter_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/typeconverter_test.go @@ -23,7 +23,8 @@ import ( "strings" "testing" - "github.com/ghodss/yaml" + "sigs.k8s.io/yaml" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" "k8s.io/kube-openapi/pkg/util/proto" diff --git a/staging/src/k8s.io/cli-runtime/artifacts/kustomization/configMap.yaml b/staging/src/k8s.io/cli-runtime/artifacts/kustomization/configMap.yaml new file mode 100644 index 0000000000..e335ab8cc8 --- /dev/null +++ b/staging/src/k8s.io/cli-runtime/artifacts/kustomization/configMap.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: the-map +data: + altGreeting: "Good Morning!" + enableRisky: "false" diff --git a/staging/src/k8s.io/cli-runtime/artifacts/kustomization/deployment.yaml b/staging/src/k8s.io/cli-runtime/artifacts/kustomization/deployment.yaml new file mode 100644 index 0000000000..6e79409080 --- /dev/null +++ b/staging/src/k8s.io/cli-runtime/artifacts/kustomization/deployment.yaml @@ -0,0 +1,30 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: the-deployment +spec: + replicas: 3 + template: + metadata: + labels: + deployment: hello + spec: + containers: + - name: the-container + image: monopole/hello:1 + command: ["/hello", + "--port=8080", + "--enableRiskyFeature=$(ENABLE_RISKY)"] + ports: + - containerPort: 8080 + env: + - name: ALT_GREETING + valueFrom: + configMapKeyRef: + name: the-map + key: altGreeting + - name: ENABLE_RISKY + valueFrom: + configMapKeyRef: + name: the-map + key: enableRisky diff --git a/staging/src/k8s.io/cli-runtime/artifacts/kustomization/kustomization.yaml b/staging/src/k8s.io/cli-runtime/artifacts/kustomization/kustomization.yaml new file mode 100644 index 0000000000..eef3195711 --- /dev/null +++ b/staging/src/k8s.io/cli-runtime/artifacts/kustomization/kustomization.yaml @@ -0,0 +1,5 @@ +nameprefix: test- +resources: +- deployment.yaml +- service.yaml +- configMap.yaml diff --git a/staging/src/k8s.io/cli-runtime/artifacts/kustomization/service.yaml b/staging/src/k8s.io/cli-runtime/artifacts/kustomization/service.yaml new file mode 100644 index 0000000000..e238f70021 --- /dev/null +++ b/staging/src/k8s.io/cli-runtime/artifacts/kustomization/service.yaml @@ -0,0 +1,12 @@ +kind: Service +apiVersion: v1 +metadata: + name: the-service +spec: + selector: + deployment: hello + type: LoadBalancer + ports: + - protocol: TCP + port: 8666 + targetPort: 8080 diff --git a/staging/src/k8s.io/cli-runtime/artifacts/kustomization/should-not-create.yaml b/staging/src/k8s.io/cli-runtime/artifacts/kustomization/should-not-create.yaml new file mode 100644 index 0000000000..a927e6b98b --- /dev/null +++ b/staging/src/k8s.io/cli-runtime/artifacts/kustomization/should-not-create.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: should-not-create-map +data: + altGreeting: "Good Morning!" + enableRisky: "false" diff --git a/staging/src/k8s.io/cli-runtime/artifacts/kustomization/should-not-load.yaml b/staging/src/k8s.io/cli-runtime/artifacts/kustomization/should-not-load.yaml new file mode 100644 index 0000000000..f9be4c33a1 --- /dev/null +++ b/staging/src/k8s.io/cli-runtime/artifacts/kustomization/should-not-load.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +nameprefix: test- +resources: +- deployment.yaml +- service.yaml +- configMap.yaml diff --git a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/filename_flags.go b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/filename_flags.go index 348a9c6368..0d43c7808a 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/filename_flags.go +++ b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/filename_flags.go @@ -32,6 +32,7 @@ type FileNameFlags struct { Usage string Filenames *[]string + Kustomize *string Recursive *bool } @@ -48,6 +49,9 @@ func (o *FileNameFlags) ToOptions() resource.FilenameOptions { if o.Filenames != nil { options.Filenames = *o.Filenames } + if o.Kustomize != nil { + options.Kustomize = *o.Kustomize + } return options } @@ -68,4 +72,8 @@ func (o *FileNameFlags) AddFlags(flags *pflag.FlagSet) { } flags.SetAnnotation("filename", cobra.BashCompFilenameExt, annotations) } + if o.Kustomize != nil { + flags.StringVarP(o.Kustomize, "kustomize", "k", *o.Kustomize, + "Process a kustomization directory. This flag can't be used together with -f or -R.") + } } diff --git a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource/BUILD b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource/BUILD index 22b34de008..3221ba1eb7 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource/BUILD +++ b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource/BUILD @@ -35,12 +35,14 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", + "//staging/src/k8s.io/cli-runtime/pkg/kustomize:go_default_library", "//staging/src/k8s.io/client-go/discovery:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/restmapper:go_default_library", "//vendor/golang.org/x/text/encoding/unicode:go_default_library", "//vendor/golang.org/x/text/transform:go_default_library", + "//vendor/sigs.k8s.io/kustomize/pkg/fs:go_default_library", ], ) diff --git a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource/builder.go b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource/builder.go index f3f8173a9c..08528fa298 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource/builder.go +++ b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource/builder.go @@ -130,9 +130,28 @@ func IsUsageError(err error) bool { type FilenameOptions struct { Filenames []string + Kustomize string Recursive bool } +func (o *FilenameOptions) validate() []error { + var errs []error + if len(o.Filenames) > 0 && len(o.Kustomize) > 0 { + errs = append(errs, fmt.Errorf("only one of -f or -k can be specified")) + } + if len(o.Kustomize) > 0 && o.Recursive { + errs = append(errs, fmt.Errorf("the -k flag can't be used with -f or -R")) + } + return errs +} + +func (o *FilenameOptions) RequireFilenameOrKustomize() error { + if len(o.Filenames) == 0 && len(o.Kustomize) == 0 { + return fmt.Errorf("must specify one of -f and -k") + } + return nil +} + type resourceTuple struct { Resource string Name string @@ -195,6 +214,10 @@ func (b *Builder) AddError(err error) *Builder { // If ContinueOnError() is set prior to this method, objects on the path that are not // recognized will be ignored (but logged at V(2)). func (b *Builder) FilenameParam(enforceNamespace bool, filenameOptions *FilenameOptions) *Builder { + if errs := filenameOptions.validate(); len(errs) > 0 { + b.errs = append(b.errs, errs...) + return b + } recursive := filenameOptions.Recursive paths := filenameOptions.Filenames for _, s := range paths { @@ -215,6 +238,10 @@ func (b *Builder) FilenameParam(enforceNamespace bool, filenameOptions *Filename b.Path(recursive, s) } } + if filenameOptions.Kustomize != "" { + b.paths = append(b.paths, &KustomizeVisitor{filenameOptions.Kustomize, + NewStreamVisitor(nil, b.mapper, filenameOptions.Kustomize, b.schema)}) + } if enforceNamespace { b.RequireNamespace() diff --git a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource/builder_test.go b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource/builder_test.go index 8aaba3ca22..d2e0d6ffec 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource/builder_test.go +++ b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource/builder_test.go @@ -374,6 +374,62 @@ func writeTestFile(t *testing.T, path string, contents string) { } } +func TestFilenameOptionsValidate(t *testing.T) { + testcases := []struct { + filenames []string + kustomize string + recursive bool + errExp bool + msgExp string + }{ + { + filenames: []string{"file"}, + kustomize: "dir", + errExp: true, + msgExp: "only one of -f or -k can be specified", + }, + { + kustomize: "dir", + recursive: true, + errExp: true, + msgExp: "the -k flag can't be used with -f or -R", + }, + { + filenames: []string{"file"}, + errExp: false, + }, + { + filenames: []string{"dir"}, + recursive: true, + errExp: false, + }, + { + kustomize: "dir", + errExp: false, + }, + } + for _, testcase := range testcases { + o := &FilenameOptions{ + Kustomize: testcase.kustomize, + Filenames: testcase.filenames, + Recursive: testcase.recursive, + } + errs := o.validate() + if testcase.errExp { + if len(errs) == 0 { + t.Fatalf("expected error not happened") + } + if errs[0].Error() != testcase.msgExp { + t.Fatalf("expected %s, but got %#v", testcase.msgExp, errs[0]) + } + } else { + if len(errs) > 0 { + t.Fatalf("Unexpected error %#v", errs) + } + } + } +} + func TestPathBuilderWithMultiple(t *testing.T) { // create test dirs tmpDir, err := utiltesting.MkTmpdir("recursive_test_multiple") @@ -513,6 +569,160 @@ func TestDirectoryBuilder(t *testing.T) { } } +func setupKustomizeDirectory() (string, error) { + path, err := ioutil.TempDir("/tmp", "") + if err != nil { + return "", err + } + + contents := map[string]string{ + "configmap.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: the-map +data: + altGreeting: "Good Morning!" + enableRisky: "false" +`, + "deployment.yaml": ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: the-deployment +spec: + replicas: 3 + template: + metadata: + labels: + deployment: hello + spec: + containers: + - name: the-container + image: monopole/hello:1 + command: ["/hello", + "--port=8080", + "--enableRiskyFeature=$(ENABLE_RISKY)"] + ports: + - containerPort: 8080 + env: + - name: ALT_GREETING + valueFrom: + configMapKeyRef: + name: the-map + key: altGreeting + - name: ENABLE_RISKY + valueFrom: + configMapKeyRef: + name: the-map + key: enableRisky +`, + "service.yaml": ` +kind: Service +apiVersion: v1 +metadata: + name: the-service +spec: + selector: + deployment: hello + type: LoadBalancer + ports: + - protocol: TCP + port: 8666 + targetPort: 8080 +`, + "kustomization.yaml": ` +nameprefix: test- +resources: +- deployment.yaml +- service.yaml +- configmap.yaml +`, + } + + for filename, content := range contents { + err = ioutil.WriteFile(filepath.Join(path, filename), []byte(content), 0660) + if err != nil { + return "", err + } + } + return path, nil +} + +func TestKustomizeDirectoryBuilder(t *testing.T) { + dir, err := setupKustomizeDirectory() + if err != nil { + t.Fatalf("unexpected error %v", err) + } + defer os.RemoveAll(dir) + + tests := []struct { + directory string + expectErr bool + errMsg string + number int + expectedNames []string + }{ + { + directory: "../../../artifacts/guestbook", + expectErr: true, + errMsg: "No kustomization file found", + }, + { + directory: dir, + expectErr: false, + expectedNames: []string{"test-the-map", "test-the-deployment", "test-the-service"}, + }, + { + directory: filepath.Join(dir, "kustomization.yaml"), + expectErr: true, + errMsg: "must be a directory to be a root", + }, + { + directory: "../../../artifacts/kustomization/should-not-load.yaml", + expectErr: true, + errMsg: "must be a directory to be a root", + }, + } + for _, tt := range tests { + b := newDefaultBuilder(). + FilenameParam(false, &FilenameOptions{Kustomize: tt.directory}). + NamespaceParam("test").DefaultNamespace() + test := &testVisitor{} + err := b.Do().Visit(test.Handle) + if tt.expectErr { + if err == nil { + t.Fatalf("expected error unhappened") + } + if !strings.Contains(err.Error(), tt.errMsg) { + t.Fatalf("expected %s but got %s", tt.errMsg, err.Error()) + } + } else { + if err != nil || len(test.Infos) < tt.number { + t.Fatalf("unexpected response: %v %#v", err, test.Infos) + } + contained := func(name string) bool { + for _, info := range test.Infos { + if info.Name == name && info.Namespace == "test" && info.Object != nil { + return true + } + } + return false + } + + allFound := true + for _, name := range tt.expectedNames { + if !contained(name) { + allFound = false + } + } + if !allFound { + t.Errorf("unexpected responses: %#v", test.Infos) + } + } + } +} + func TestNamespaceOverride(t *testing.T) { s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { w.WriteHeader(http.StatusOK) diff --git a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource/visitor.go b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource/visitor.go index b4e3b359a1..a679ce90af 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource/visitor.go +++ b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource/visitor.go @@ -39,6 +39,8 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/apimachinery/pkg/watch" + "k8s.io/cli-runtime/pkg/kustomize" + "sigs.k8s.io/kustomize/pkg/fs" ) const ( @@ -520,6 +522,24 @@ func (v *FileVisitor) Visit(fn VisitorFunc) error { return v.StreamVisitor.Visit(fn) } +// KustomizeVisitor is wrapper around a StreamVisitor, to handle Kustomization directories +type KustomizeVisitor struct { + Path string + *StreamVisitor +} + +// Visit in a KustomizeVisitor gets the output of Kustomize build and save it in the Streamvisitor +func (v *KustomizeVisitor) Visit(fn VisitorFunc) error { + fSys := fs.MakeRealFS() + var out bytes.Buffer + err := kustomize.RunKustomizeBuild(&out, fSys, v.Path) + if err != nil { + return err + } + v.StreamVisitor.Reader = bytes.NewReader(out.Bytes()) + return v.StreamVisitor.Visit(fn) +} + // StreamVisitor reads objects from an io.Reader and walks them. A stream visitor can only be // visited once. // TODO: depends on objects being in JSON format before being passed to decode - need to implement diff --git a/staging/src/k8s.io/client-go/examples/fake-client/main_test.go b/staging/src/k8s.io/client-go/examples/fake-client/main_test.go index d77d7ecbc4..6f83c3e8d5 100644 --- a/staging/src/k8s.io/client-go/examples/fake-client/main_test.go +++ b/staging/src/k8s.io/client-go/examples/fake-client/main_test.go @@ -61,7 +61,7 @@ func TestFakeClient(t *testing.T) { // Inject an event into the fake client. p := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "my-pod"}} - _, err := client.Core().Pods("test-ns").Create(p) + _, err := client.CoreV1().Pods("test-ns").Create(p) if err != nil { t.Errorf("error injecting pod add: %v", err) } diff --git a/staging/src/k8s.io/client-go/kubernetes/clientset.go b/staging/src/k8s.io/client-go/kubernetes/clientset.go index ff211d5b32..dbaeffb1c9 100644 --- a/staging/src/k8s.io/client-go/kubernetes/clientset.go +++ b/staging/src/k8s.io/client-go/kubernetes/clientset.go @@ -61,74 +61,38 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface AppsV1() appsv1.AppsV1Interface - // Deprecated: please explicitly pick a version if possible. - Apps() appsv1.AppsV1Interface AppsV1beta1() appsv1beta1.AppsV1beta1Interface AppsV1beta2() appsv1beta2.AppsV1beta2Interface AuditregistrationV1alpha1() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface - // Deprecated: please explicitly pick a version if possible. - Auditregistration() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface AuthenticationV1() authenticationv1.AuthenticationV1Interface - // Deprecated: please explicitly pick a version if possible. - Authentication() authenticationv1.AuthenticationV1Interface AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface AuthorizationV1() authorizationv1.AuthorizationV1Interface - // Deprecated: please explicitly pick a version if possible. - Authorization() authorizationv1.AuthorizationV1Interface AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface AutoscalingV1() autoscalingv1.AutoscalingV1Interface - // Deprecated: please explicitly pick a version if possible. - Autoscaling() autoscalingv1.AutoscalingV1Interface AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface AutoscalingV2beta2() autoscalingv2beta2.AutoscalingV2beta2Interface BatchV1() batchv1.BatchV1Interface - // Deprecated: please explicitly pick a version if possible. - Batch() batchv1.BatchV1Interface BatchV1beta1() batchv1beta1.BatchV1beta1Interface BatchV2alpha1() batchv2alpha1.BatchV2alpha1Interface CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Certificates() certificatesv1beta1.CertificatesV1beta1Interface CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface CoordinationV1() coordinationv1.CoordinationV1Interface - // Deprecated: please explicitly pick a version if possible. - Coordination() coordinationv1.CoordinationV1Interface CoreV1() corev1.CoreV1Interface - // Deprecated: please explicitly pick a version if possible. - Core() corev1.CoreV1Interface EventsV1beta1() eventsv1beta1.EventsV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Events() eventsv1beta1.EventsV1beta1Interface ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Extensions() extensionsv1beta1.ExtensionsV1beta1Interface NetworkingV1() networkingv1.NetworkingV1Interface - // Deprecated: please explicitly pick a version if possible. - Networking() networkingv1.NetworkingV1Interface NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Policy() policyv1beta1.PolicyV1beta1Interface RbacV1() rbacv1.RbacV1Interface - // Deprecated: please explicitly pick a version if possible. - Rbac() rbacv1.RbacV1Interface RbacV1beta1() rbacv1beta1.RbacV1beta1Interface RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface SchedulingV1() schedulingv1.SchedulingV1Interface - // Deprecated: please explicitly pick a version if possible. - Scheduling() schedulingv1.SchedulingV1Interface SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface - // Deprecated: please explicitly pick a version if possible. - Settings() settingsv1alpha1.SettingsV1alpha1Interface StorageV1beta1() storagev1beta1.StorageV1beta1Interface StorageV1() storagev1.StorageV1Interface - // Deprecated: please explicitly pick a version if possible. - Storage() storagev1.StorageV1Interface StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface } @@ -177,23 +141,11 @@ func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1. return c.admissionregistrationV1beta1 } -// Deprecated: Admissionregistration retrieves the default version of AdmissionregistrationClient. -// Please explicitly pick a version. -func (c *Clientset) Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { - return c.admissionregistrationV1beta1 -} - // AppsV1 retrieves the AppsV1Client func (c *Clientset) AppsV1() appsv1.AppsV1Interface { return c.appsV1 } -// Deprecated: Apps retrieves the default version of AppsClient. -// Please explicitly pick a version. -func (c *Clientset) Apps() appsv1.AppsV1Interface { - return c.appsV1 -} - // AppsV1beta1 retrieves the AppsV1beta1Client func (c *Clientset) AppsV1beta1() appsv1beta1.AppsV1beta1Interface { return c.appsV1beta1 @@ -209,23 +161,11 @@ func (c *Clientset) AuditregistrationV1alpha1() auditregistrationv1alpha1.Auditr return c.auditregistrationV1alpha1 } -// Deprecated: Auditregistration retrieves the default version of AuditregistrationClient. -// Please explicitly pick a version. -func (c *Clientset) Auditregistration() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface { - return c.auditregistrationV1alpha1 -} - // AuthenticationV1 retrieves the AuthenticationV1Client func (c *Clientset) AuthenticationV1() authenticationv1.AuthenticationV1Interface { return c.authenticationV1 } -// Deprecated: Authentication retrieves the default version of AuthenticationClient. -// Please explicitly pick a version. -func (c *Clientset) Authentication() authenticationv1.AuthenticationV1Interface { - return c.authenticationV1 -} - // AuthenticationV1beta1 retrieves the AuthenticationV1beta1Client func (c *Clientset) AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface { return c.authenticationV1beta1 @@ -236,12 +176,6 @@ func (c *Clientset) AuthorizationV1() authorizationv1.AuthorizationV1Interface { return c.authorizationV1 } -// Deprecated: Authorization retrieves the default version of AuthorizationClient. -// Please explicitly pick a version. -func (c *Clientset) Authorization() authorizationv1.AuthorizationV1Interface { - return c.authorizationV1 -} - // AuthorizationV1beta1 retrieves the AuthorizationV1beta1Client func (c *Clientset) AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface { return c.authorizationV1beta1 @@ -252,12 +186,6 @@ func (c *Clientset) AutoscalingV1() autoscalingv1.AutoscalingV1Interface { return c.autoscalingV1 } -// Deprecated: Autoscaling retrieves the default version of AutoscalingClient. -// Please explicitly pick a version. -func (c *Clientset) Autoscaling() autoscalingv1.AutoscalingV1Interface { - return c.autoscalingV1 -} - // AutoscalingV2beta1 retrieves the AutoscalingV2beta1Client func (c *Clientset) AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface { return c.autoscalingV2beta1 @@ -273,12 +201,6 @@ func (c *Clientset) BatchV1() batchv1.BatchV1Interface { return c.batchV1 } -// Deprecated: Batch retrieves the default version of BatchClient. -// Please explicitly pick a version. -func (c *Clientset) Batch() batchv1.BatchV1Interface { - return c.batchV1 -} - // BatchV1beta1 retrieves the BatchV1beta1Client func (c *Clientset) BatchV1beta1() batchv1beta1.BatchV1beta1Interface { return c.batchV1beta1 @@ -294,12 +216,6 @@ func (c *Clientset) CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta return c.certificatesV1beta1 } -// Deprecated: Certificates retrieves the default version of CertificatesClient. -// Please explicitly pick a version. -func (c *Clientset) Certificates() certificatesv1beta1.CertificatesV1beta1Interface { - return c.certificatesV1beta1 -} - // CoordinationV1beta1 retrieves the CoordinationV1beta1Client func (c *Clientset) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface { return c.coordinationV1beta1 @@ -310,56 +226,26 @@ func (c *Clientset) CoordinationV1() coordinationv1.CoordinationV1Interface { return c.coordinationV1 } -// Deprecated: Coordination retrieves the default version of CoordinationClient. -// Please explicitly pick a version. -func (c *Clientset) Coordination() coordinationv1.CoordinationV1Interface { - return c.coordinationV1 -} - // CoreV1 retrieves the CoreV1Client func (c *Clientset) CoreV1() corev1.CoreV1Interface { return c.coreV1 } -// Deprecated: Core retrieves the default version of CoreClient. -// Please explicitly pick a version. -func (c *Clientset) Core() corev1.CoreV1Interface { - return c.coreV1 -} - // EventsV1beta1 retrieves the EventsV1beta1Client func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface { return c.eventsV1beta1 } -// Deprecated: Events retrieves the default version of EventsClient. -// Please explicitly pick a version. -func (c *Clientset) Events() eventsv1beta1.EventsV1beta1Interface { - return c.eventsV1beta1 -} - // ExtensionsV1beta1 retrieves the ExtensionsV1beta1Client func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface { return c.extensionsV1beta1 } -// Deprecated: Extensions retrieves the default version of ExtensionsClient. -// Please explicitly pick a version. -func (c *Clientset) Extensions() extensionsv1beta1.ExtensionsV1beta1Interface { - return c.extensionsV1beta1 -} - // NetworkingV1 retrieves the NetworkingV1Client func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { return c.networkingV1 } -// Deprecated: Networking retrieves the default version of NetworkingClient. -// Please explicitly pick a version. -func (c *Clientset) Networking() networkingv1.NetworkingV1Interface { - return c.networkingV1 -} - // NetworkingV1beta1 retrieves the NetworkingV1beta1Client func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface { return c.networkingV1beta1 @@ -370,23 +256,11 @@ func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface { return c.policyV1beta1 } -// Deprecated: Policy retrieves the default version of PolicyClient. -// Please explicitly pick a version. -func (c *Clientset) Policy() policyv1beta1.PolicyV1beta1Interface { - return c.policyV1beta1 -} - // RbacV1 retrieves the RbacV1Client func (c *Clientset) RbacV1() rbacv1.RbacV1Interface { return c.rbacV1 } -// Deprecated: Rbac retrieves the default version of RbacClient. -// Please explicitly pick a version. -func (c *Clientset) Rbac() rbacv1.RbacV1Interface { - return c.rbacV1 -} - // RbacV1beta1 retrieves the RbacV1beta1Client func (c *Clientset) RbacV1beta1() rbacv1beta1.RbacV1beta1Interface { return c.rbacV1beta1 @@ -412,23 +286,11 @@ func (c *Clientset) SchedulingV1() schedulingv1.SchedulingV1Interface { return c.schedulingV1 } -// Deprecated: Scheduling retrieves the default version of SchedulingClient. -// Please explicitly pick a version. -func (c *Clientset) Scheduling() schedulingv1.SchedulingV1Interface { - return c.schedulingV1 -} - // SettingsV1alpha1 retrieves the SettingsV1alpha1Client func (c *Clientset) SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface { return c.settingsV1alpha1 } -// Deprecated: Settings retrieves the default version of SettingsClient. -// Please explicitly pick a version. -func (c *Clientset) Settings() settingsv1alpha1.SettingsV1alpha1Interface { - return c.settingsV1alpha1 -} - // StorageV1beta1 retrieves the StorageV1beta1Client func (c *Clientset) StorageV1beta1() storagev1beta1.StorageV1beta1Interface { return c.storageV1beta1 @@ -439,12 +301,6 @@ func (c *Clientset) StorageV1() storagev1.StorageV1Interface { return c.storageV1 } -// Deprecated: Storage retrieves the default version of StorageClient. -// Please explicitly pick a version. -func (c *Clientset) Storage() storagev1.StorageV1Interface { - return c.storageV1 -} - // StorageV1alpha1 retrieves the StorageV1alpha1Client func (c *Clientset) StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface { return c.storageV1alpha1 diff --git a/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go index 0b7c06112a..9a13a650d6 100644 --- a/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go +++ b/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go @@ -142,21 +142,11 @@ func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1. return &fakeadmissionregistrationv1beta1.FakeAdmissionregistrationV1beta1{Fake: &c.Fake} } -// Admissionregistration retrieves the AdmissionregistrationV1beta1Client -func (c *Clientset) Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { - return &fakeadmissionregistrationv1beta1.FakeAdmissionregistrationV1beta1{Fake: &c.Fake} -} - // AppsV1 retrieves the AppsV1Client func (c *Clientset) AppsV1() appsv1.AppsV1Interface { return &fakeappsv1.FakeAppsV1{Fake: &c.Fake} } -// Apps retrieves the AppsV1Client -func (c *Clientset) Apps() appsv1.AppsV1Interface { - return &fakeappsv1.FakeAppsV1{Fake: &c.Fake} -} - // AppsV1beta1 retrieves the AppsV1beta1Client func (c *Clientset) AppsV1beta1() appsv1beta1.AppsV1beta1Interface { return &fakeappsv1beta1.FakeAppsV1beta1{Fake: &c.Fake} @@ -172,21 +162,11 @@ func (c *Clientset) AuditregistrationV1alpha1() auditregistrationv1alpha1.Auditr return &fakeauditregistrationv1alpha1.FakeAuditregistrationV1alpha1{Fake: &c.Fake} } -// Auditregistration retrieves the AuditregistrationV1alpha1Client -func (c *Clientset) Auditregistration() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface { - return &fakeauditregistrationv1alpha1.FakeAuditregistrationV1alpha1{Fake: &c.Fake} -} - // AuthenticationV1 retrieves the AuthenticationV1Client func (c *Clientset) AuthenticationV1() authenticationv1.AuthenticationV1Interface { return &fakeauthenticationv1.FakeAuthenticationV1{Fake: &c.Fake} } -// Authentication retrieves the AuthenticationV1Client -func (c *Clientset) Authentication() authenticationv1.AuthenticationV1Interface { - return &fakeauthenticationv1.FakeAuthenticationV1{Fake: &c.Fake} -} - // AuthenticationV1beta1 retrieves the AuthenticationV1beta1Client func (c *Clientset) AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface { return &fakeauthenticationv1beta1.FakeAuthenticationV1beta1{Fake: &c.Fake} @@ -197,11 +177,6 @@ func (c *Clientset) AuthorizationV1() authorizationv1.AuthorizationV1Interface { return &fakeauthorizationv1.FakeAuthorizationV1{Fake: &c.Fake} } -// Authorization retrieves the AuthorizationV1Client -func (c *Clientset) Authorization() authorizationv1.AuthorizationV1Interface { - return &fakeauthorizationv1.FakeAuthorizationV1{Fake: &c.Fake} -} - // AuthorizationV1beta1 retrieves the AuthorizationV1beta1Client func (c *Clientset) AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface { return &fakeauthorizationv1beta1.FakeAuthorizationV1beta1{Fake: &c.Fake} @@ -212,11 +187,6 @@ func (c *Clientset) AutoscalingV1() autoscalingv1.AutoscalingV1Interface { return &fakeautoscalingv1.FakeAutoscalingV1{Fake: &c.Fake} } -// Autoscaling retrieves the AutoscalingV1Client -func (c *Clientset) Autoscaling() autoscalingv1.AutoscalingV1Interface { - return &fakeautoscalingv1.FakeAutoscalingV1{Fake: &c.Fake} -} - // AutoscalingV2beta1 retrieves the AutoscalingV2beta1Client func (c *Clientset) AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface { return &fakeautoscalingv2beta1.FakeAutoscalingV2beta1{Fake: &c.Fake} @@ -232,11 +202,6 @@ func (c *Clientset) BatchV1() batchv1.BatchV1Interface { return &fakebatchv1.FakeBatchV1{Fake: &c.Fake} } -// Batch retrieves the BatchV1Client -func (c *Clientset) Batch() batchv1.BatchV1Interface { - return &fakebatchv1.FakeBatchV1{Fake: &c.Fake} -} - // BatchV1beta1 retrieves the BatchV1beta1Client func (c *Clientset) BatchV1beta1() batchv1beta1.BatchV1beta1Interface { return &fakebatchv1beta1.FakeBatchV1beta1{Fake: &c.Fake} @@ -252,11 +217,6 @@ func (c *Clientset) CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta return &fakecertificatesv1beta1.FakeCertificatesV1beta1{Fake: &c.Fake} } -// Certificates retrieves the CertificatesV1beta1Client -func (c *Clientset) Certificates() certificatesv1beta1.CertificatesV1beta1Interface { - return &fakecertificatesv1beta1.FakeCertificatesV1beta1{Fake: &c.Fake} -} - // CoordinationV1beta1 retrieves the CoordinationV1beta1Client func (c *Clientset) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface { return &fakecoordinationv1beta1.FakeCoordinationV1beta1{Fake: &c.Fake} @@ -267,51 +227,26 @@ func (c *Clientset) CoordinationV1() coordinationv1.CoordinationV1Interface { return &fakecoordinationv1.FakeCoordinationV1{Fake: &c.Fake} } -// Coordination retrieves the CoordinationV1Client -func (c *Clientset) Coordination() coordinationv1.CoordinationV1Interface { - return &fakecoordinationv1.FakeCoordinationV1{Fake: &c.Fake} -} - // CoreV1 retrieves the CoreV1Client func (c *Clientset) CoreV1() corev1.CoreV1Interface { return &fakecorev1.FakeCoreV1{Fake: &c.Fake} } -// Core retrieves the CoreV1Client -func (c *Clientset) Core() corev1.CoreV1Interface { - return &fakecorev1.FakeCoreV1{Fake: &c.Fake} -} - // EventsV1beta1 retrieves the EventsV1beta1Client func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface { return &fakeeventsv1beta1.FakeEventsV1beta1{Fake: &c.Fake} } -// Events retrieves the EventsV1beta1Client -func (c *Clientset) Events() eventsv1beta1.EventsV1beta1Interface { - return &fakeeventsv1beta1.FakeEventsV1beta1{Fake: &c.Fake} -} - // ExtensionsV1beta1 retrieves the ExtensionsV1beta1Client func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface { return &fakeextensionsv1beta1.FakeExtensionsV1beta1{Fake: &c.Fake} } -// Extensions retrieves the ExtensionsV1beta1Client -func (c *Clientset) Extensions() extensionsv1beta1.ExtensionsV1beta1Interface { - return &fakeextensionsv1beta1.FakeExtensionsV1beta1{Fake: &c.Fake} -} - // NetworkingV1 retrieves the NetworkingV1Client func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { return &fakenetworkingv1.FakeNetworkingV1{Fake: &c.Fake} } -// Networking retrieves the NetworkingV1Client -func (c *Clientset) Networking() networkingv1.NetworkingV1Interface { - return &fakenetworkingv1.FakeNetworkingV1{Fake: &c.Fake} -} - // NetworkingV1beta1 retrieves the NetworkingV1beta1Client func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface { return &fakenetworkingv1beta1.FakeNetworkingV1beta1{Fake: &c.Fake} @@ -322,21 +257,11 @@ func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface { return &fakepolicyv1beta1.FakePolicyV1beta1{Fake: &c.Fake} } -// Policy retrieves the PolicyV1beta1Client -func (c *Clientset) Policy() policyv1beta1.PolicyV1beta1Interface { - return &fakepolicyv1beta1.FakePolicyV1beta1{Fake: &c.Fake} -} - // RbacV1 retrieves the RbacV1Client func (c *Clientset) RbacV1() rbacv1.RbacV1Interface { return &fakerbacv1.FakeRbacV1{Fake: &c.Fake} } -// Rbac retrieves the RbacV1Client -func (c *Clientset) Rbac() rbacv1.RbacV1Interface { - return &fakerbacv1.FakeRbacV1{Fake: &c.Fake} -} - // RbacV1beta1 retrieves the RbacV1beta1Client func (c *Clientset) RbacV1beta1() rbacv1beta1.RbacV1beta1Interface { return &fakerbacv1beta1.FakeRbacV1beta1{Fake: &c.Fake} @@ -362,21 +287,11 @@ func (c *Clientset) SchedulingV1() schedulingv1.SchedulingV1Interface { return &fakeschedulingv1.FakeSchedulingV1{Fake: &c.Fake} } -// Scheduling retrieves the SchedulingV1Client -func (c *Clientset) Scheduling() schedulingv1.SchedulingV1Interface { - return &fakeschedulingv1.FakeSchedulingV1{Fake: &c.Fake} -} - // SettingsV1alpha1 retrieves the SettingsV1alpha1Client func (c *Clientset) SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface { return &fakesettingsv1alpha1.FakeSettingsV1alpha1{Fake: &c.Fake} } -// Settings retrieves the SettingsV1alpha1Client -func (c *Clientset) Settings() settingsv1alpha1.SettingsV1alpha1Interface { - return &fakesettingsv1alpha1.FakeSettingsV1alpha1{Fake: &c.Fake} -} - // StorageV1beta1 retrieves the StorageV1beta1Client func (c *Clientset) StorageV1beta1() storagev1beta1.StorageV1beta1Interface { return &fakestoragev1beta1.FakeStorageV1beta1{Fake: &c.Fake} @@ -387,11 +302,6 @@ func (c *Clientset) StorageV1() storagev1.StorageV1Interface { return &fakestoragev1.FakeStorageV1{Fake: &c.Fake} } -// Storage retrieves the StorageV1Client -func (c *Clientset) Storage() storagev1.StorageV1Interface { - return &fakestoragev1.FakeStorageV1{Fake: &c.Fake} -} - // StorageV1alpha1 retrieves the StorageV1alpha1Client func (c *Clientset) StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface { return &fakestoragev1alpha1.FakeStorageV1alpha1{Fake: &c.Fake} diff --git a/staging/src/k8s.io/client-go/tools/cache/listwatch.go b/staging/src/k8s.io/client-go/tools/cache/listwatch.go index f86791650e..8227b73b69 100644 --- a/staging/src/k8s.io/client-go/tools/cache/listwatch.go +++ b/staging/src/k8s.io/client-go/tools/cache/listwatch.go @@ -27,15 +27,25 @@ import ( "k8s.io/client-go/tools/pager" ) -// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource. -type ListerWatcher interface { +// Lister is any object that knows how to perform an initial list. +type Lister interface { // List should return a list type object; the Items field will be extracted, and the // ResourceVersion field will be used to start the watch in the right place. List(options metav1.ListOptions) (runtime.Object, error) +} + +// Watcher is any object that knows how to start a watch on a resource. +type Watcher interface { // Watch should begin a watch at the specified version. Watch(options metav1.ListOptions) (watch.Interface, error) } +// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource. +type ListerWatcher interface { + Lister + Watcher +} + // ListFunc knows how to list resources type ListFunc func(options metav1.ListOptions) (runtime.Object, error) diff --git a/staging/src/k8s.io/client-go/tools/portforward/portforward.go b/staging/src/k8s.io/client-go/tools/portforward/portforward.go index 357680ede0..a50a9973ee 100644 --- a/staging/src/k8s.io/client-go/tools/portforward/portforward.go +++ b/staging/src/k8s.io/client-go/tools/portforward/portforward.go @@ -205,8 +205,9 @@ func (pf *PortForwarder) forward() error { var err error listenSuccess := false - for _, port := range pf.ports { - err = pf.listenOnPort(&port) + for i := range pf.ports { + port := &pf.ports[i] + err = pf.listenOnPort(port) switch { case err == nil: listenSuccess = true diff --git a/staging/src/k8s.io/client-go/tools/portforward/portforward_test.go b/staging/src/k8s.io/client-go/tools/portforward/portforward_test.go index dd8d4fd5e5..b39fa504ee 100644 --- a/staging/src/k8s.io/client-go/tools/portforward/portforward_test.go +++ b/staging/src/k8s.io/client-go/tools/portforward/portforward_test.go @@ -18,11 +18,13 @@ package portforward import ( "net" + "net/http" "os" "reflect" "sort" "strings" "testing" + "time" "k8s.io/apimachinery/pkg/util/httpstream" ) @@ -39,6 +41,37 @@ func (d *fakeDialer) Dial(protocols ...string) (httpstream.Connection, string, e return d.conn, d.negotiatedProtocol, d.err } +type fakeConnection struct { + closed bool + closeChan chan bool +} + +func newFakeConnection() httpstream.Connection { + return &fakeConnection{ + closeChan: make(chan bool), + } +} + +func (c *fakeConnection) CreateStream(headers http.Header) (httpstream.Stream, error) { + return nil, nil +} + +func (c *fakeConnection) Close() error { + if !c.closed { + c.closed = true + close(c.closeChan) + } + return nil +} + +func (c *fakeConnection) CloseChan() <-chan bool { + return c.closeChan +} + +func (c *fakeConnection) SetIdleTimeout(timeout time.Duration) { + // no-op +} + func TestParsePortsAndNew(t *testing.T) { tests := []struct { input []string @@ -310,3 +343,46 @@ func TestGetListener(t *testing.T) { } } + +func TestGetPortsReturnsDynamicallyAssignedLocalPort(t *testing.T) { + dialer := &fakeDialer{ + conn: newFakeConnection(), + } + + stopChan := make(chan struct{}) + readyChan := make(chan struct{}) + errChan := make(chan error) + + defer func() { + close(stopChan) + + forwardErr := <-errChan + if forwardErr != nil { + t.Fatalf("ForwardPorts returned error: %s", forwardErr) + } + }() + + pf, err := New(dialer, []string{":5000"}, stopChan, readyChan, os.Stdout, os.Stderr) + + if err != nil { + t.Fatalf("error while calling New: %s", err) + } + + go func() { + errChan <- pf.ForwardPorts() + close(errChan) + }() + + <-pf.Ready + + ports, err := pf.GetPorts() + + if len(ports) != 1 { + t.Fatalf("expected 1 port, got %d", len(ports)) + } + + port := ports[0] + if port.Local == 0 { + t.Fatalf("local port is 0, expected != 0") + } +} diff --git a/staging/src/k8s.io/client-go/tools/watch/BUILD b/staging/src/k8s.io/client-go/tools/watch/BUILD index 9f7a97cd4a..f31c836986 100644 --- a/staging/src/k8s.io/client-go/tools/watch/BUILD +++ b/staging/src/k8s.io/client-go/tools/watch/BUILD @@ -4,18 +4,22 @@ go_library( name = "go_default_library", srcs = [ "informerwatcher.go", + "retrywatcher.go", "until.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/tools/watch", importpath = "k8s.io/client-go/tools/watch", visibility = ["//visibility:public"], deps = [ + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + "//vendor/github.com/davecgh/go-spew/spew:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) @@ -24,11 +28,13 @@ go_test( name = "go_default_test", srcs = [ "informerwatcher_test.go", + "retrywatcher_test.go", "until_test.go", ], embed = [":go_default_library"], deps = [ "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", @@ -39,6 +45,7 @@ go_test( "//staging/src/k8s.io/client-go/testing:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/tools/watch/informerwatcher_test.go b/staging/src/k8s.io/client-go/tools/watch/informerwatcher_test.go index b5a09f0c32..051898654f 100644 --- a/staging/src/k8s.io/client-go/tools/watch/informerwatcher_test.go +++ b/staging/src/k8s.io/client-go/tools/watch/informerwatcher_test.go @@ -182,10 +182,10 @@ func TestNewInformerWatcher(t *testing.T) { lw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return fake.Core().Secrets("").List(options) + return fake.CoreV1().Secrets("").List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return fake.Core().Secrets("").Watch(options) + return fake.CoreV1().Secrets("").Watch(options) }, } _, _, w, done := NewIndexerInformerWatcher(lw, &corev1.Secret{}) diff --git a/staging/src/k8s.io/client-go/tools/watch/retrywatcher.go b/staging/src/k8s.io/client-go/tools/watch/retrywatcher.go new file mode 100644 index 0000000000..62c14b0784 --- /dev/null +++ b/staging/src/k8s.io/client-go/tools/watch/retrywatcher.go @@ -0,0 +1,283 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/davecgh/go-spew/spew" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + "k8s.io/klog" +) + +// resourceVersionGetter is an interface used to get resource version from events. +// We can't reuse an interface from meta otherwise it would be a cyclic dependency and we need just this one method +type resourceVersionGetter interface { + GetResourceVersion() string +} + +// RetryWatcher will make sure that in case the underlying watcher is closed (e.g. due to API timeout or etcd timeout) +// it will get restarted from the last point without the consumer even knowing about it. +// RetryWatcher does that by inspecting events and keeping track of resourceVersion. +// Especially useful when using watch.UntilWithoutRetry where premature termination is causing issues and flakes. +// Please note that this is not resilient to etcd cache not having the resource version anymore - you would need to +// use Informers for that. +type RetryWatcher struct { + lastResourceVersion string + watcherClient cache.Watcher + resultChan chan watch.Event + stopChan chan struct{} + doneChan chan struct{} + minRestartDelay time.Duration +} + +// NewRetryWatcher creates a new RetryWatcher. +// It will make sure that watches gets restarted in case of recoverable errors. +// The initialResourceVersion will be given to watch method when first called. +func NewRetryWatcher(initialResourceVersion string, watcherClient cache.Watcher) (*RetryWatcher, error) { + return newRetryWatcher(initialResourceVersion, watcherClient, 1*time.Second) +} + +func newRetryWatcher(initialResourceVersion string, watcherClient cache.Watcher, minRestartDelay time.Duration) (*RetryWatcher, error) { + switch initialResourceVersion { + case "", "0": + // TODO: revisit this if we ever get WATCH v2 where it means start "now" + // without doing the synthetic list of objects at the beginning (see #74022) + return nil, fmt.Errorf("initial RV %q is not supported due to issues with underlying WATCH", initialResourceVersion) + default: + break + } + + rw := &RetryWatcher{ + lastResourceVersion: initialResourceVersion, + watcherClient: watcherClient, + stopChan: make(chan struct{}), + doneChan: make(chan struct{}), + resultChan: make(chan watch.Event, 0), + minRestartDelay: minRestartDelay, + } + + go rw.receive() + return rw, nil +} + +func (rw *RetryWatcher) send(event watch.Event) bool { + // Writing to an unbuffered channel is blocking operation + // and we need to check if stop wasn't requested while doing so. + select { + case rw.resultChan <- event: + return true + case <-rw.stopChan: + return false + } +} + +// doReceive returns true when it is done, false otherwise. +// If it is not done the second return value holds the time to wait before calling it again. +func (rw *RetryWatcher) doReceive() (bool, time.Duration) { + watcher, err := rw.watcherClient.Watch(metav1.ListOptions{ + ResourceVersion: rw.lastResourceVersion, + }) + // We are very unlikely to hit EOF here since we are just establishing the call, + // but it may happen that the apiserver is just shutting down (e.g. being restarted) + // This is consistent with how it is handled for informers + switch err { + case nil: + break + + case io.EOF: + // watch closed normally + return false, 0 + + case io.ErrUnexpectedEOF: + klog.V(1).Infof("Watch closed with unexpected EOF: %v", err) + return false, 0 + + default: + msg := "Watch failed: %v" + if net.IsProbableEOF(err) { + klog.V(5).Infof(msg, err) + // Retry + return false, 0 + } + + klog.Errorf(msg, err) + // Retry + return false, 0 + } + + if watcher == nil { + klog.Error("Watch returned nil watcher") + // Retry + return false, 0 + } + + ch := watcher.ResultChan() + defer watcher.Stop() + + for { + select { + case <-rw.stopChan: + klog.V(4).Info("Stopping RetryWatcher.") + return true, 0 + case event, ok := <-ch: + if !ok { + klog.V(4).Infof("Failed to get event! Re-creating the watcher. Last RV: %s", rw.lastResourceVersion) + return false, 0 + } + + // We need to inspect the event and get ResourceVersion out of it + switch event.Type { + case watch.Added, watch.Modified, watch.Deleted: + metaObject, ok := event.Object.(resourceVersionGetter) + if !ok { + _ = rw.send(watch.Event{ + Type: watch.Error, + Object: &apierrors.NewInternalError(errors.New("retryWatcher: doesn't support resourceVersion")).ErrStatus, + }) + // We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! + return true, 0 + } + + resourceVersion := metaObject.GetResourceVersion() + if resourceVersion == "" { + _ = rw.send(watch.Event{ + Type: watch.Error, + Object: &apierrors.NewInternalError(fmt.Errorf("retryWatcher: object %#v doesn't support resourceVersion", event.Object)).ErrStatus, + }) + // We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! + return true, 0 + } + + // All is fine; send the event and update lastResourceVersion + ok = rw.send(event) + if !ok { + return true, 0 + } + rw.lastResourceVersion = resourceVersion + + continue + + case watch.Error: + status, ok := event.Object.(*metav1.Status) + if !ok { + klog.Error(spew.Sprintf("Received an error which is not *metav1.Status but %#+v", event.Object)) + // Retry unknown errors + return false, 0 + } + + statusDelay := time.Duration(0) + if status.Details != nil { + statusDelay = time.Duration(status.Details.RetryAfterSeconds) * time.Second + } + + switch status.Code { + case http.StatusGone: + // Never retry RV too old errors + _ = rw.send(event) + return true, 0 + + case http.StatusGatewayTimeout, http.StatusInternalServerError: + // Retry + return false, statusDelay + + default: + // We retry by default. RetryWatcher is meant to proceed unless it is certain + // that it can't. If we are not certain, we proceed with retry and leave it + // up to the user to timeout if needed. + + // Log here so we have a record of hitting the unexpected error + // and we can whitelist some error codes if we missed any that are expected. + klog.V(5).Info(spew.Sprintf("Retrying after unexpected error: %#+v", event.Object)) + + // Retry + return false, statusDelay + } + + default: + klog.Errorf("Failed to recognize Event type %q", event.Type) + _ = rw.send(watch.Event{ + Type: watch.Error, + Object: &apierrors.NewInternalError(fmt.Errorf("retryWatcher failed to recognize Event type %q", event.Type)).ErrStatus, + }) + // We are unable to restart the watch and have to stop the loop or this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! + return true, 0 + } + } + } +} + +// receive reads the result from a watcher, restarting it if necessary. +func (rw *RetryWatcher) receive() { + defer close(rw.doneChan) + defer close(rw.resultChan) + + klog.V(4).Info("Starting RetryWatcher.") + defer klog.V(4).Info("Stopping RetryWatcher.") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + select { + case <-rw.stopChan: + cancel() + return + case <-ctx.Done(): + return + } + }() + + // We use non sliding until so we don't introduce delays on happy path when WATCH call + // timeouts or gets closed and we need to reestablish it while also avoiding hot loops. + wait.NonSlidingUntilWithContext(ctx, func(ctx context.Context) { + done, retryAfter := rw.doReceive() + if done { + cancel() + return + } + + time.Sleep(retryAfter) + + klog.V(4).Infof("Restarting RetryWatcher at RV=%q", rw.lastResourceVersion) + }, rw.minRestartDelay) +} + +// ResultChan implements Interface. +func (rw *RetryWatcher) ResultChan() <-chan watch.Event { + return rw.resultChan +} + +// Stop implements Interface. +func (rw *RetryWatcher) Stop() { + close(rw.stopChan) +} + +// Done allows the caller to be notified when Retry watcher stops. +func (rw *RetryWatcher) Done() <-chan struct{} { + return rw.doneChan +} diff --git a/staging/src/k8s.io/client-go/tools/watch/retrywatcher_test.go b/staging/src/k8s.io/client-go/tools/watch/retrywatcher_test.go new file mode 100644 index 0000000000..cd57e51c52 --- /dev/null +++ b/staging/src/k8s.io/client-go/tools/watch/retrywatcher_test.go @@ -0,0 +1,593 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "errors" + "flag" + "fmt" + "reflect" + "strconv" + "sync/atomic" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + "k8s.io/klog" +) + +func init() { + // Enable klog which is used in dependencies + klog.InitFlags(nil) + flag.Set("logtostderr", "true") + flag.Set("v", "9") +} + +type testObject struct { + resourceVersion string +} + +func (o testObject) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } +func (o testObject) DeepCopyObject() runtime.Object { return o } +func (o testObject) GetResourceVersion() string { return o.resourceVersion } + +func withCounter(w cache.Watcher) (*uint32, cache.Watcher) { + var counter uint32 + return &counter, &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + atomic.AddUint32(&counter, 1) + return w.Watch(options) + }, + } +} + +func makeTestEvent(rv int) watch.Event { + return watch.Event{ + Type: watch.Added, + Object: testObject{ + resourceVersion: fmt.Sprintf("%d", rv), + }, + } +} + +func arrayToChannel(array []watch.Event) chan watch.Event { + ch := make(chan watch.Event, len(array)) + + for _, event := range array { + ch <- event + } + + return ch +} + +// parseResourceVersionOrDie is test-only that code simulating the server and thus can interpret resourceVersion +func parseResourceVersionOrDie(resourceVersion string) uint64 { + // We can't use etcdstorage.Versioner.ParseResourceVersion() because of imports restrictions + + if resourceVersion == "" { + return 0 + } + version, err := strconv.ParseUint(resourceVersion, 10, 64) + if err != nil { + panic(fmt.Errorf("failed to parse resourceVersion %q", resourceVersion)) + } + return version +} + +func fromRV(resourceVersion string, array []watch.Event) []watch.Event { + var result []watch.Event + rv := parseResourceVersionOrDie(resourceVersion) + for _, event := range array { + if event.Type == watch.Error { + if len(result) == 0 { + // Skip error events until we find an object matching RV requirement + continue + } + } else { + rvGetter, ok := event.Object.(resourceVersionGetter) + if ok { + if parseResourceVersionOrDie(rvGetter.GetResourceVersion()) <= rv { + continue + } + } + } + + result = append(result, event) + } + + return result +} + +func closeAfterN(n int, source chan watch.Event) chan watch.Event { + result := make(chan watch.Event, 0) + go func() { + defer close(result) + defer close(source) + for i := 0; i < n; i++ { + result <- <-source + } + }() + return result +} + +type unexpectedError struct { + // Inheriting any struct fulfilling runtime.Object interface would do. + metav1.Status +} + +var _ runtime.Object = &unexpectedError{} + +func TestNewRetryWatcher(t *testing.T) { + tt := []struct { + name string + initialRV string + err error + }{ + { + name: "empty RV should fail", + initialRV: "", + err: errors.New("initial RV \"\" is not supported due to issues with underlying WATCH"), + }, + { + name: "RV \"0\" should fail", + initialRV: "0", + err: errors.New("initial RV \"0\" is not supported due to issues with underlying WATCH"), + }, + } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + _, err := NewRetryWatcher(tc.initialRV, nil) + if !reflect.DeepEqual(err, tc.err) { + t.Errorf("Expected error: %v, got: %v", tc.err, err) + } + }) + } +} + +func TestRetryWatcher(t *testing.T) { + tt := []struct { + name string + initialRV string + watchClient cache.Watcher + watchCount uint32 + expected []watch.Event + }{ + { + name: "recovers if watchClient returns error", + initialRV: "1", + watchClient: &cache.ListWatch{ + WatchFunc: func() func(options metav1.ListOptions) (watch.Interface, error) { + firstRun := true + return func(options metav1.ListOptions) (watch.Interface, error) { + if firstRun { + firstRun = false + return nil, fmt.Errorf("test error") + } + + return watch.NewProxyWatcher(arrayToChannel(fromRV(options.ResourceVersion, []watch.Event{ + makeTestEvent(2), + }))), nil + } + }(), + }, + watchCount: 2, + expected: []watch.Event{ + makeTestEvent(2), + }, + }, + { + name: "recovers if watchClient returns nil watcher", + initialRV: "1", + watchClient: &cache.ListWatch{ + WatchFunc: func() func(options metav1.ListOptions) (watch.Interface, error) { + firstRun := true + return func(options metav1.ListOptions) (watch.Interface, error) { + if firstRun { + firstRun = false + return nil, nil + } + + return watch.NewProxyWatcher(arrayToChannel(fromRV(options.ResourceVersion, []watch.Event{ + makeTestEvent(2), + }))), nil + } + }(), + }, + watchCount: 2, + expected: []watch.Event{ + makeTestEvent(2), + }, + }, + { + name: "works with empty initialRV", + initialRV: "1", + watchClient: &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return watch.NewProxyWatcher(arrayToChannel(fromRV(options.ResourceVersion, []watch.Event{ + makeTestEvent(2), + }))), nil + }, + }, + watchCount: 1, + expected: []watch.Event{ + makeTestEvent(2), + }, + }, + { + name: "works with initialRV set, skipping the preceding items but reading those directly following", + initialRV: "1", + watchClient: &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return watch.NewProxyWatcher(arrayToChannel(fromRV(options.ResourceVersion, []watch.Event{ + makeTestEvent(1), + makeTestEvent(2), + }))), nil + }, + }, + watchCount: 1, + expected: []watch.Event{ + makeTestEvent(2), + }, + }, + { + name: "works with initialRV set, skipping the preceding items with none following", + initialRV: "3", + watchClient: &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return watch.NewProxyWatcher(arrayToChannel(fromRV(options.ResourceVersion, []watch.Event{ + makeTestEvent(2), + }))), nil + }, + }, + watchCount: 1, + expected: nil, + }, + { + name: "fails on Gone (RV too old error)", + initialRV: "5", + watchClient: &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return watch.NewProxyWatcher(arrayToChannel(fromRV(options.ResourceVersion, []watch.Event{ + makeTestEvent(5), + makeTestEvent(6), + {Type: watch.Error, Object: &apierrors.NewGone("").ErrStatus}, + makeTestEvent(7), + makeTestEvent(8), + }))), nil + }, + }, + watchCount: 1, + expected: []watch.Event{ + makeTestEvent(6), + { + Type: watch.Error, + Object: &apierrors.NewGone("").ErrStatus, + }, + }, + }, + { + name: "recovers from timeout error", + initialRV: "5", + watchClient: &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return watch.NewProxyWatcher(arrayToChannel(fromRV(options.ResourceVersion, []watch.Event{ + makeTestEvent(6), + { + Type: watch.Error, + Object: &apierrors.NewTimeoutError("", 0).ErrStatus, + }, + makeTestEvent(7), + }))), nil + }, + }, + watchCount: 2, + expected: []watch.Event{ + makeTestEvent(6), + makeTestEvent(7), + }, + }, + { + name: "recovers from internal server error", + initialRV: "5", + watchClient: &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return watch.NewProxyWatcher(arrayToChannel(fromRV(options.ResourceVersion, []watch.Event{ + makeTestEvent(6), + { + Type: watch.Error, + Object: &apierrors.NewInternalError(errors.New("")).ErrStatus, + }, + makeTestEvent(7), + }))), nil + }, + }, + watchCount: 2, + expected: []watch.Event{ + makeTestEvent(6), + makeTestEvent(7), + }, + }, + { + name: "recovers from unexpected error code", + initialRV: "5", + watchClient: &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return watch.NewProxyWatcher(arrayToChannel(fromRV(options.ResourceVersion, []watch.Event{ + makeTestEvent(6), + { + Type: watch.Error, + Object: &metav1.Status{ + Code: 666, + }, + }, + makeTestEvent(7), + }))), nil + }, + }, + watchCount: 2, + expected: []watch.Event{ + makeTestEvent(6), + makeTestEvent(7), + }, + }, + { + name: "recovers from unexpected error type", + initialRV: "5", + watchClient: &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return watch.NewProxyWatcher(arrayToChannel(fromRV(options.ResourceVersion, []watch.Event{ + makeTestEvent(6), + { + Type: watch.Error, + Object: &unexpectedError{}, + }, + makeTestEvent(7), + }))), nil + }, + }, + watchCount: 2, + expected: []watch.Event{ + makeTestEvent(6), + makeTestEvent(7), + }, + }, + { + name: "survives 1 closed watch and reads 1 item", + initialRV: "5", + watchClient: &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return watch.NewProxyWatcher(closeAfterN(1, arrayToChannel(fromRV(options.ResourceVersion, []watch.Event{ + makeTestEvent(6), + })))), nil + }, + }, + watchCount: 2, + expected: []watch.Event{ + makeTestEvent(6), + }, + }, + { + name: "survives 2 closed watches and reads 2 items", + initialRV: "4", + watchClient: &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return watch.NewProxyWatcher(closeAfterN(1, arrayToChannel(fromRV(options.ResourceVersion, []watch.Event{ + makeTestEvent(5), + makeTestEvent(6), + })))), nil + }, + }, + watchCount: 3, + expected: []watch.Event{ + makeTestEvent(5), + makeTestEvent(6), + }, + }, + { + name: "survives 2 closed watches and reads 2 items for nonconsecutive RVs", + initialRV: "4", + watchClient: &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return watch.NewProxyWatcher(closeAfterN(1, arrayToChannel(fromRV(options.ResourceVersion, []watch.Event{ + makeTestEvent(5), + makeTestEvent(7), + })))), nil + }, + }, + watchCount: 3, + expected: []watch.Event{ + makeTestEvent(5), + makeTestEvent(7), + }, + }, + { + name: "survives 2 closed watches and reads 2 items for nonconsecutive RVs starting at much lower RV", + initialRV: "2", + watchClient: &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return watch.NewProxyWatcher(closeAfterN(1, arrayToChannel(fromRV(options.ResourceVersion, []watch.Event{ + makeTestEvent(5), + makeTestEvent(7), + })))), nil + }, + }, + watchCount: 3, + expected: []watch.Event{ + makeTestEvent(5), + makeTestEvent(7), + }, + }, + { + name: "survives 4 closed watches and reads 4 items for nonconsecutive, spread RVs", + initialRV: "2", + watchClient: &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return watch.NewProxyWatcher(closeAfterN(1, arrayToChannel(fromRV(options.ResourceVersion, []watch.Event{ + makeTestEvent(5), + makeTestEvent(6), + makeTestEvent(7), + makeTestEvent(11), + })))), nil + }, + }, + watchCount: 5, + expected: []watch.Event{ + makeTestEvent(5), + makeTestEvent(6), + makeTestEvent(7), + makeTestEvent(11), + }, + }, + { + name: "survives 4 closed watches and reads 4 items for nonconsecutive, spread RVs and skips those with lower or equal RV", + initialRV: "2", + watchClient: &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return watch.NewProxyWatcher(closeAfterN(1, arrayToChannel(fromRV(options.ResourceVersion, []watch.Event{ + makeTestEvent(1), + makeTestEvent(2), + makeTestEvent(5), + makeTestEvent(6), + makeTestEvent(7), + makeTestEvent(11), + })))), nil + }, + }, + watchCount: 5, + expected: []watch.Event{ + makeTestEvent(5), + makeTestEvent(6), + makeTestEvent(7), + makeTestEvent(11), + }, + }, + { + name: "survives 2 closed watches and reads 2+2+1 items skipping those with equal RV", + initialRV: "1", + watchClient: &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return watch.NewProxyWatcher(closeAfterN(2, arrayToChannel(fromRV(options.ResourceVersion, []watch.Event{ + makeTestEvent(1), + makeTestEvent(2), + makeTestEvent(5), + makeTestEvent(6), + makeTestEvent(7), + makeTestEvent(11), + })))), nil + }, + }, + watchCount: 3, + expected: []watch.Event{ + makeTestEvent(2), + makeTestEvent(5), + makeTestEvent(6), + makeTestEvent(7), + makeTestEvent(11), + }, + }, + } + + for _, tc := range tt { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + atomicCounter, watchFunc := withCounter(tc.watchClient) + watcher, err := newRetryWatcher(tc.initialRV, watchFunc, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create a RetryWatcher: %v", err) + } + defer func() { + watcher.Stop() + t.Log("Waiting on RetryWatcher to stop...") + <-watcher.Done() + }() + + var got []watch.Event + for i := 0; i < len(tc.expected); i++ { + event, ok := <-watcher.ResultChan() + if !ok { + t.Error(spew.Errorf("expected event %#+v, but channel is closed"), tc.expected[i]) + break + } + + got = append(got, event) + } + + // (Sanity check, best effort) Make sure there are no more events to be received + // RetryWatcher proxies the source channel so we can't try reading it immediately + // but have to tolerate some delay. Given this is best effort detection we can use short duration. + // It also makes sure that for 0 events the watchFunc has time to be called. + select { + case event, ok := <-watcher.ResultChan(): + if ok { + t.Error(spew.Errorf("Unexpected event received after reading all the expected ones: %#+v", event)) + } + case <-time.After(10 * time.Millisecond): + break + } + + counter := atomic.LoadUint32(atomicCounter) + if counter != tc.watchCount { + t.Errorf("expected %d watcher starts, but it has started %d times", tc.watchCount, counter) + } + + if !reflect.DeepEqual(tc.expected, got) { + t.Fatal(spew.Errorf("expected %#+v, got %#+v;\ndiff: %s", tc.expected, got, diff.ObjectReflectDiff(tc.expected, got))) + } + }) + } +} + +func TestRetryWatcherToFinishWithUnreadEvents(t *testing.T) { + watcher, err := NewRetryWatcher("1", &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return watch.NewProxyWatcher(arrayToChannel([]watch.Event{ + makeTestEvent(2), + })), nil + }, + }) + if err != nil { + t.Fatalf("failed to create a RetryWatcher: %v", err) + } + + // Give the watcher a chance to get to sending events (blocking) + time.Sleep(10 * time.Millisecond) + + watcher.Stop() + + select { + case <-watcher.Done(): + break + case <-time.After(10 * time.Millisecond): + t.Error("Failed to close the watcher") + } + + // RetryWatcher result channel should be closed + _, ok := <-watcher.ResultChan() + if ok { + t.Error("ResultChan is not closed") + } +} diff --git a/staging/src/k8s.io/client-go/tools/watch/until.go b/staging/src/k8s.io/client-go/tools/watch/until.go index 541343711d..e12d82aca4 100644 --- a/staging/src/k8s.io/client-go/tools/watch/until.go +++ b/staging/src/k8s.io/client-go/tools/watch/until.go @@ -95,6 +95,25 @@ func UntilWithoutRetry(ctx context.Context, watcher watch.Interface, conditions return lastEvent, nil } +// Until wraps the watcherClient's watch function with RetryWatcher making sure that watcher gets restarted in case of errors. +// The initialResourceVersion will be given to watch method when first called. It shall not be "" or "0" +// given the underlying WATCH call issues (#74022). If you want the initial list ("", "0") done for you use ListWatchUntil instead. +// Remaining behaviour is identical to function UntilWithoutRetry. (See above.) +// Until can deal with API timeouts and lost connections. +// It guarantees you to see all events and in the order they happened. +// Due to this guarantee there is no way it can deal with 'Resource version too old error'. It will fail in this case. +// (See `UntilWithSync` if you'd prefer to recover from all the errors including RV too old by re-listing +// those items. In normal code you should care about being level driven so you'd not care about not seeing all the edges.) +// The most frequent usage for Until would be a test where you want to verify exact order of events ("edges"). +func Until(ctx context.Context, initialResourceVersion string, watcherClient cache.Watcher, conditions ...ConditionFunc) (*watch.Event, error) { + w, err := NewRetryWatcher(initialResourceVersion, watcherClient) + if err != nil { + return nil, err + } + + return UntilWithoutRetry(ctx, w, conditions...) +} + // UntilWithSync creates an informer from lw, optionally checks precondition when the store is synced, // and watches the output until each provided condition succeeds, in a way that is identical // to function UntilWithoutRetry. (See above.) @@ -149,13 +168,14 @@ func ContextWithOptionalTimeout(parent context.Context, timeout time.Duration) ( return context.WithTimeout(parent, timeout) } -// ListWatchUntil checks the provided conditions against the items returned by the list watcher, returning wait.ErrWaitTimeout -// if timeout is exceeded without all conditions returning true, or an error if an error occurs. -// TODO: check for watch expired error and retry watch from latest point? Same issue exists for Until. -// TODO: remove when no longer used -// -// Deprecated: Use UntilWithSync instead. -func ListWatchUntil(timeout time.Duration, lw cache.ListerWatcher, conditions ...ConditionFunc) (*watch.Event, error) { +// ListWatchUntil first lists objects, converts them into synthetic ADDED events +// and checks conditions for those synthetic events. If the conditions have not been reached so far +// it continues by calling Until which establishes a watch from resourceVersion of the list call +// to evaluate those conditions based on new events. +// ListWatchUntil provides the same guarantees as Until and replaces the old WATCH from RV "" (or "0") +// which was mixing list and watch calls internally and having severe design issues. (see #74022) +// There is no resourceVersion order guarantee for the initial list and those synthetic events. +func ListWatchUntil(ctx context.Context, lw cache.ListerWatcher, conditions ...ConditionFunc) (*watch.Event, error) { if len(conditions) == 0 { return nil, nil } @@ -212,17 +232,5 @@ func ListWatchUntil(timeout time.Duration, lw cache.ListerWatcher, conditions .. } currResourceVersion := metaObj.GetResourceVersion() - watchInterface, err := lw.Watch(metav1.ListOptions{ResourceVersion: currResourceVersion}) - if err != nil { - return nil, err - } - - ctx, cancel := ContextWithOptionalTimeout(context.Background(), timeout) - defer cancel() - evt, err := UntilWithoutRetry(ctx, watchInterface, remainingConditions...) - if err == ErrWatchClosed { - // present a consistent error interface to callers - err = wait.ErrWaitTimeout - } - return evt, err + return Until(ctx, currResourceVersion, lw, remainingConditions...) } diff --git a/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/clientset.go b/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/clientset.go index 72931e51ed..c2052dfd40 100644 --- a/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/clientset.go +++ b/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/clientset.go @@ -28,8 +28,6 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface ExampleV1() examplev1.ExampleV1Interface - // Deprecated: please explicitly pick a version if possible. - Example() examplev1.ExampleV1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -44,12 +42,6 @@ func (c *Clientset) ExampleV1() examplev1.ExampleV1Interface { return c.exampleV1 } -// Deprecated: Example retrieves the default version of ExampleClient. -// Please explicitly pick a version. -func (c *Clientset) Example() examplev1.ExampleV1Interface { - return c.exampleV1 -} - // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { diff --git a/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/clientset_generated.go b/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/clientset_generated.go index 9a7307606c..6851148d5a 100644 --- a/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/clientset_generated.go +++ b/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/clientset_generated.go @@ -75,8 +75,3 @@ var _ clientset.Interface = &Clientset{} func (c *Clientset) ExampleV1() examplev1.ExampleV1Interface { return &fakeexamplev1.FakeExampleV1{Fake: &c.Fake} } - -// Example retrieves the ExampleV1Client -func (c *Clientset) Example() examplev1.ExampleV1Interface { - return &fakeexamplev1.FakeExampleV1{Fake: &c.Fake} -} diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/clientset.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/clientset.go index 8ba9799f5b..7039a827b5 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/clientset.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/clientset.go @@ -29,11 +29,7 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface ExampleV1() examplev1.ExampleV1Interface - // Deprecated: please explicitly pick a version if possible. - Example() examplev1.ExampleV1Interface SecondExampleV1() secondexamplev1.SecondExampleV1Interface - // Deprecated: please explicitly pick a version if possible. - SecondExample() secondexamplev1.SecondExampleV1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -49,23 +45,11 @@ func (c *Clientset) ExampleV1() examplev1.ExampleV1Interface { return c.exampleV1 } -// Deprecated: Example retrieves the default version of ExampleClient. -// Please explicitly pick a version. -func (c *Clientset) Example() examplev1.ExampleV1Interface { - return c.exampleV1 -} - // SecondExampleV1 retrieves the SecondExampleV1Client func (c *Clientset) SecondExampleV1() secondexamplev1.SecondExampleV1Interface { return c.secondExampleV1 } -// Deprecated: SecondExample retrieves the default version of SecondExampleClient. -// Please explicitly pick a version. -func (c *Clientset) SecondExample() secondexamplev1.SecondExampleV1Interface { - return c.secondExampleV1 -} - // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go index 6dce4fed44..22ff8e6604 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go @@ -78,17 +78,7 @@ func (c *Clientset) ExampleV1() examplev1.ExampleV1Interface { return &fakeexamplev1.FakeExampleV1{Fake: &c.Fake} } -// Example retrieves the ExampleV1Client -func (c *Clientset) Example() examplev1.ExampleV1Interface { - return &fakeexamplev1.FakeExampleV1{Fake: &c.Fake} -} - // SecondExampleV1 retrieves the SecondExampleV1Client func (c *Clientset) SecondExampleV1() secondexamplev1.SecondExampleV1Interface { return &fakesecondexamplev1.FakeSecondExampleV1{Fake: &c.Fake} } - -// SecondExample retrieves the SecondExampleV1Client -func (c *Clientset) SecondExample() secondexamplev1.SecondExampleV1Interface { - return &fakesecondexamplev1.FakeSecondExampleV1{Fake: &c.Fake} -} diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/clientset.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/clientset.go index 034e06ad06..812dbd1246 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/clientset.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/clientset.go @@ -29,11 +29,7 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface ExampleV1() examplev1.ExampleV1Interface - // Deprecated: please explicitly pick a version if possible. - Example() examplev1.ExampleV1Interface SecondExampleV1() secondexamplev1.SecondExampleV1Interface - // Deprecated: please explicitly pick a version if possible. - SecondExample() secondexamplev1.SecondExampleV1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -49,23 +45,11 @@ func (c *Clientset) ExampleV1() examplev1.ExampleV1Interface { return c.exampleV1 } -// Deprecated: Example retrieves the default version of ExampleClient. -// Please explicitly pick a version. -func (c *Clientset) Example() examplev1.ExampleV1Interface { - return c.exampleV1 -} - // SecondExampleV1 retrieves the SecondExampleV1Client func (c *Clientset) SecondExampleV1() secondexamplev1.SecondExampleV1Interface { return c.secondExampleV1 } -// Deprecated: SecondExample retrieves the default version of SecondExampleClient. -// Please explicitly pick a version. -func (c *Clientset) SecondExample() secondexamplev1.SecondExampleV1Interface { - return c.secondExampleV1 -} - // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/clientset_generated.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/clientset_generated.go index 4ad8bba757..3a80679be7 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/clientset_generated.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/clientset_generated.go @@ -78,17 +78,7 @@ func (c *Clientset) ExampleV1() examplev1.ExampleV1Interface { return &fakeexamplev1.FakeExampleV1{Fake: &c.Fake} } -// Example retrieves the ExampleV1Client -func (c *Clientset) Example() examplev1.ExampleV1Interface { - return &fakeexamplev1.FakeExampleV1{Fake: &c.Fake} -} - // SecondExampleV1 retrieves the SecondExampleV1Client func (c *Clientset) SecondExampleV1() secondexamplev1.SecondExampleV1Interface { return &fakesecondexamplev1.FakeSecondExampleV1{Fake: &c.Fake} } - -// SecondExample retrieves the SecondExampleV1Client -func (c *Clientset) SecondExample() secondexamplev1.SecondExampleV1Interface { - return &fakesecondexamplev1.FakeSecondExampleV1{Fake: &c.Fake} -} diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go index 61b3334f40..80e4361b77 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go @@ -102,10 +102,6 @@ func (g *genClientset) GenerateType(c *generator.Context, t *types.Type, w io.Wr } sw.Do(clientsetInterfaceImplTemplate, m) - // don't generated the default method if generating internalversion clientset - if group.IsDefaultVersion && group.Version != "" { - sw.Do(clientsetInterfaceDefaultVersionImpl, m) - } } return sw.Error() diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go index 6fdb29a94a..a1e67dcbdf 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go @@ -88,10 +88,6 @@ func (g *genClientset) GenerateType(c *generator.Context, t *types.Type, w io.Wr sw.Do(clientsetTemplate, m) for _, g := range allGroups { sw.Do(clientsetInterfaceImplTemplate, g) - // don't generated the default method if generating internalversion clientset - if g.IsDefaultVersion && g.Version != "" { - sw.Do(clientsetInterfaceDefaultVersionImpl, g) - } } sw.Do(getDiscoveryTemplate, m) sw.Do(newClientsetForConfigTemplate, m) @@ -105,9 +101,7 @@ var clientsetInterface = ` type Interface interface { Discovery() $.DiscoveryInterface|raw$ $range .allGroups$$.GroupGoName$$.Version$() $.PackageAlias$.$.GroupGoName$$.Version$Interface - $if .IsDefaultVersion$// Deprecated: please explicitly pick a version if possible. - $.GroupGoName$() $.PackageAlias$.$.GroupGoName$$.Version$Interface - $end$$end$ + $end$ } ` @@ -128,14 +122,6 @@ func (c *Clientset) $.GroupGoName$$.Version$() $.PackageAlias$.$.GroupGoName$$.V } ` -var clientsetInterfaceDefaultVersionImpl = ` -// Deprecated: $.GroupGoName$ retrieves the default version of $.GroupGoName$Client. -// Please explicitly pick a version. -func (c *Clientset) $.GroupGoName$() $.PackageAlias$.$.GroupGoName$$.Version$Interface { - return c.$.LowerCaseGroupGoName$$.Version$ -} -` - var getDiscoveryTemplate = ` // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() $.DiscoveryInterface|raw$ { diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/types/helpers.go b/staging/src/k8s.io/code-generator/cmd/client-gen/types/helpers.go index 33e6ac451b..59f2fd4449 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/types/helpers.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/types/helpers.go @@ -73,7 +73,7 @@ func (a sortableSliceOfVersions) Less(i, j int) bool { } // Determine the default version among versions. If a user calls a group client -// without specifying the version (e.g., c.Core(), instead of c.CoreV1()), the +// without specifying the version (e.g., c.CoreV1(), instead of c.CoreV1()), the // default version will be returned. func defaultVersion(versions []PackageVersion) Version { var versionStrings []string @@ -88,14 +88,12 @@ func defaultVersion(versions []PackageVersion) Version { func ToGroupVersionInfo(groups []GroupVersions, groupGoNames map[GroupVersion]string) []GroupVersionInfo { var groupVersionPackages []GroupVersionInfo for _, group := range groups { - defaultVersion := defaultVersion(group.Versions) for _, version := range group.Versions { groupGoName := groupGoNames[GroupVersion{Group: group.Group, Version: version.Version}] groupVersionPackages = append(groupVersionPackages, GroupVersionInfo{ Group: Group(namer.IC(group.Group.NonEmpty())), Version: Version(namer.IC(version.Version.String())), PackageAlias: strings.ToLower(groupGoName + version.Version.NonEmpty()), - IsDefaultVersion: version.Version == defaultVersion && version.Version != "", GroupGoName: groupGoName, LowerCaseGroupGoName: namer.IL(groupGoName), }) diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/types/types.go b/staging/src/k8s.io/code-generator/cmd/client-gen/types/types.go index 17fd6e92a7..7d1606c508 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/types/types.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/types/types.go @@ -62,11 +62,8 @@ type GroupVersions struct { // GroupVersionInfo contains all the info around a group version. type GroupVersionInfo struct { - Group Group - Version Version - // If a user calls a group client without specifying the version (e.g., - // c.Core(), instead of c.CoreV1()), the default version will be returned. - IsDefaultVersion bool + Group Group + Version Version PackageAlias string GroupGoName string LowerCaseGroupGoName string diff --git a/staging/src/k8s.io/csi-api/pkg/client/clientset/versioned/clientset.go b/staging/src/k8s.io/csi-api/pkg/client/clientset/versioned/clientset.go index 9d9099a7fe..4a3cbf5036 100644 --- a/staging/src/k8s.io/csi-api/pkg/client/clientset/versioned/clientset.go +++ b/staging/src/k8s.io/csi-api/pkg/client/clientset/versioned/clientset.go @@ -28,8 +28,6 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface CsiV1alpha1() csiv1alpha1.CsiV1alpha1Interface - // Deprecated: please explicitly pick a version if possible. - Csi() csiv1alpha1.CsiV1alpha1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -44,12 +42,6 @@ func (c *Clientset) CsiV1alpha1() csiv1alpha1.CsiV1alpha1Interface { return c.csiV1alpha1 } -// Deprecated: Csi retrieves the default version of CsiClient. -// Please explicitly pick a version. -func (c *Clientset) Csi() csiv1alpha1.CsiV1alpha1Interface { - return c.csiV1alpha1 -} - // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { diff --git a/staging/src/k8s.io/csi-api/pkg/client/clientset/versioned/fake/clientset_generated.go b/staging/src/k8s.io/csi-api/pkg/client/clientset/versioned/fake/clientset_generated.go index 6cc8be08b2..baa4a4583b 100644 --- a/staging/src/k8s.io/csi-api/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/staging/src/k8s.io/csi-api/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -75,8 +75,3 @@ var _ clientset.Interface = &Clientset{} func (c *Clientset) CsiV1alpha1() csiv1alpha1.CsiV1alpha1Interface { return &fakecsiv1alpha1.FakeCsiV1alpha1{Fake: &c.Fake} } - -// Csi retrieves the CsiV1alpha1Client -func (c *Clientset) Csi() csiv1alpha1.CsiV1alpha1Interface { - return &fakecsiv1alpha1.FakeCsiV1alpha1{Fake: &c.Fake} -} diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/clientset.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/clientset.go index 8cf29e4368..f0b0b670ed 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/clientset.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/clientset.go @@ -30,8 +30,6 @@ type Interface interface { Discovery() discovery.DiscoveryInterface ApiregistrationV1beta1() apiregistrationv1beta1.ApiregistrationV1beta1Interface ApiregistrationV1() apiregistrationv1.ApiregistrationV1Interface - // Deprecated: please explicitly pick a version if possible. - Apiregistration() apiregistrationv1.ApiregistrationV1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -52,12 +50,6 @@ func (c *Clientset) ApiregistrationV1() apiregistrationv1.ApiregistrationV1Inter return c.apiregistrationV1 } -// Deprecated: Apiregistration retrieves the default version of ApiregistrationClient. -// Please explicitly pick a version. -func (c *Clientset) Apiregistration() apiregistrationv1.ApiregistrationV1Interface { - return c.apiregistrationV1 -} - // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/clientset_generated.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/clientset_generated.go index b74f701a5f..c522bf9956 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/clientset_generated.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/clientset_generated.go @@ -82,8 +82,3 @@ func (c *Clientset) ApiregistrationV1beta1() apiregistrationv1beta1.Apiregistrat func (c *Clientset) ApiregistrationV1() apiregistrationv1.ApiregistrationV1Interface { return &fakeapiregistrationv1.FakeApiregistrationV1{Fake: &c.Fake} } - -// Apiregistration retrieves the ApiregistrationV1Client -func (c *Clientset) Apiregistration() apiregistrationv1.ApiregistrationV1Interface { - return &fakeapiregistrationv1.FakeApiregistrationV1{Fake: &c.Fake} -} diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/metrics.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/metrics.go index c36527b107..539641ea93 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/metrics.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/metrics.go @@ -30,7 +30,7 @@ var ( ) unavailableGauge = prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Name: "aggregator_unavailable_apiserver_gauge", + Name: "aggregator_unavailable_apiservice", Help: "Gauge of APIServices which are marked as unavailable broken down by APIService name.", }, []string{"name"}, diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/clientset.go b/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/clientset.go index 309b3207d1..ef64f63731 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/clientset.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/clientset.go @@ -30,8 +30,6 @@ type Interface interface { Discovery() discovery.DiscoveryInterface MetricsV1alpha1() metricsv1alpha1.MetricsV1alpha1Interface MetricsV1beta1() metricsv1beta1.MetricsV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Metrics() metricsv1beta1.MetricsV1beta1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -52,12 +50,6 @@ func (c *Clientset) MetricsV1beta1() metricsv1beta1.MetricsV1beta1Interface { return c.metricsV1beta1 } -// Deprecated: Metrics retrieves the default version of MetricsClient. -// Please explicitly pick a version. -func (c *Clientset) Metrics() metricsv1beta1.MetricsV1beta1Interface { - return c.metricsV1beta1 -} - // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/fake/clientset_generated.go b/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/fake/clientset_generated.go index aa066c49a3..6114ff7b98 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -82,8 +82,3 @@ func (c *Clientset) MetricsV1alpha1() metricsv1alpha1.MetricsV1alpha1Interface { func (c *Clientset) MetricsV1beta1() metricsv1beta1.MetricsV1beta1Interface { return &fakemetricsv1beta1.FakeMetricsV1beta1{Fake: &c.Fake} } - -// Metrics retrieves the MetricsV1beta1Client -func (c *Clientset) Metrics() metricsv1beta1.MetricsV1beta1Interface { - return &fakemetricsv1beta1.FakeMetricsV1beta1{Fake: &c.Fake} -} diff --git a/staging/src/k8s.io/node-api/pkg/client/clientset/versioned/clientset.go b/staging/src/k8s.io/node-api/pkg/client/clientset/versioned/clientset.go index dac1ab38bf..4f38604723 100644 --- a/staging/src/k8s.io/node-api/pkg/client/clientset/versioned/clientset.go +++ b/staging/src/k8s.io/node-api/pkg/client/clientset/versioned/clientset.go @@ -28,8 +28,6 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface - // Deprecated: please explicitly pick a version if possible. - Node() nodev1alpha1.NodeV1alpha1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -44,12 +42,6 @@ func (c *Clientset) NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface { return c.nodeV1alpha1 } -// Deprecated: Node retrieves the default version of NodeClient. -// Please explicitly pick a version. -func (c *Clientset) Node() nodev1alpha1.NodeV1alpha1Interface { - return c.nodeV1alpha1 -} - // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { diff --git a/staging/src/k8s.io/node-api/pkg/client/clientset/versioned/fake/clientset_generated.go b/staging/src/k8s.io/node-api/pkg/client/clientset/versioned/fake/clientset_generated.go index 1126d72d94..0c213cd43a 100644 --- a/staging/src/k8s.io/node-api/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/staging/src/k8s.io/node-api/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -75,8 +75,3 @@ var _ clientset.Interface = &Clientset{} func (c *Clientset) NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface { return &fakenodev1alpha1.FakeNodeV1alpha1{Fake: &c.Fake} } - -// Node retrieves the NodeV1alpha1Client -func (c *Clientset) Node() nodev1alpha1.NodeV1alpha1Interface { - return &fakenodev1alpha1.FakeNodeV1alpha1{Fake: &c.Fake} -} diff --git a/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/clientset.go b/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/clientset.go index d22823aab9..e8723c4d4d 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/clientset.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/clientset.go @@ -30,8 +30,6 @@ type Interface interface { Discovery() discovery.DiscoveryInterface WardleV1alpha1() wardlev1alpha1.WardleV1alpha1Interface WardleV1beta1() wardlev1beta1.WardleV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Wardle() wardlev1beta1.WardleV1beta1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -52,12 +50,6 @@ func (c *Clientset) WardleV1beta1() wardlev1beta1.WardleV1beta1Interface { return c.wardleV1beta1 } -// Deprecated: Wardle retrieves the default version of WardleClient. -// Please explicitly pick a version. -func (c *Clientset) Wardle() wardlev1beta1.WardleV1beta1Interface { - return c.wardleV1beta1 -} - // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { diff --git a/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/fake/clientset_generated.go b/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/fake/clientset_generated.go index 3a79d302eb..5c746a80d2 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/fake/clientset_generated.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/fake/clientset_generated.go @@ -82,8 +82,3 @@ func (c *Clientset) WardleV1alpha1() wardlev1alpha1.WardleV1alpha1Interface { func (c *Clientset) WardleV1beta1() wardlev1beta1.WardleV1beta1Interface { return &fakewardlev1beta1.FakeWardleV1beta1{Fake: &c.Fake} } - -// Wardle retrieves the WardleV1beta1Client -func (c *Clientset) Wardle() wardlev1beta1.WardleV1beta1Interface { - return &fakewardlev1beta1.FakeWardleV1beta1{Fake: &c.Fake} -} diff --git a/staging/src/k8s.io/sample-cli-plugin/Godeps/Godeps.json b/staging/src/k8s.io/sample-cli-plugin/Godeps/Godeps.json index 31e32ef6d7..59c363e673 100644 --- a/staging/src/k8s.io/sample-cli-plugin/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-cli-plugin/Godeps/Godeps.json @@ -6,14 +6,50 @@ "./..." ], "Deps": [ + { + "ImportPath": "github.com/PuerkitoBio/purell", + "Rev": "8a290539e2e8629dbc4e6bad948158f790ec31f4" + }, + { + "ImportPath": "github.com/PuerkitoBio/urlesc", + "Rev": "5bd2802263f21d8788851d5305584c82a5c75d7e" + }, { "ImportPath": "github.com/davecgh/go-spew/spew", "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" }, + { + "ImportPath": "github.com/emicklei/go-restful", + "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" + }, + { + "ImportPath": "github.com/emicklei/go-restful/log", + "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" + }, { "ImportPath": "github.com/evanphx/json-patch", "Rev": "5858425f75500d40c52783dce87d085a483ce135" }, + { + "ImportPath": "github.com/ghodss/yaml", + "Rev": "c7ce16629ff4cd059ed96ed06419dd3856fd3577" + }, + { + "ImportPath": "github.com/go-openapi/jsonpointer", + "Rev": "ef5f0afec364d3b9396b7b77b43dbe26bf1f8004" + }, + { + "ImportPath": "github.com/go-openapi/jsonreference", + "Rev": "8483a886a90412cd6858df4ea3483dce9c8e35a3" + }, + { + "ImportPath": "github.com/go-openapi/spec", + "Rev": "5bae59e25b21498baea7f9d46e9c147ec106a42e" + }, + { + "ImportPath": "github.com/go-openapi/swag", + "Rev": "5899d5c5e619fda5fa86e14795a835f473ca284c" + }, { "ImportPath": "github.com/gogo/protobuf/proto", "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" @@ -82,6 +118,18 @@ "ImportPath": "github.com/json-iterator/go", "Rev": "ab8a2e0c74be9d3be70b3184d9acc634935ded82" }, + { + "ImportPath": "github.com/mailru/easyjson/buffer", + "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" + }, + { + "ImportPath": "github.com/mailru/easyjson/jlexer", + "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" + }, + { + "ImportPath": "github.com/mailru/easyjson/jwriter", + "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" + }, { "ImportPath": "github.com/modern-go/concurrent", "Rev": "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" @@ -94,6 +142,10 @@ "ImportPath": "github.com/peterbourgon/diskv", "Rev": "5f041e8faa004a95c88a202771f4cc3e991971e6" }, + { + "ImportPath": "github.com/pkg/errors", + "Rev": "645ef00459ed84a119197bfb8d8205042c6df63d" + }, { "ImportPath": "github.com/spf13/cobra", "Rev": "c439c4fa093711d42e1b01acb1235b52004753c1" @@ -142,6 +194,10 @@ "ImportPath": "golang.org/x/sys/windows", "Rev": "95c6576299259db960f6c5b9b69ea52422860fce" }, + { + "ImportPath": "golang.org/x/text/cases", + "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" + }, { "ImportPath": "golang.org/x/text/encoding", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" @@ -158,10 +214,22 @@ "ImportPath": "golang.org/x/text/encoding/unicode", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, + { + "ImportPath": "golang.org/x/text/internal", + "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" + }, + { + "ImportPath": "golang.org/x/text/internal/tag", + "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" + }, { "ImportPath": "golang.org/x/text/internal/utf8internal", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, + { + "ImportPath": "golang.org/x/text/language", + "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" + }, { "ImportPath": "golang.org/x/text/runes", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" @@ -170,6 +238,10 @@ "ImportPath": "golang.org/x/text/secure/bidirule", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, + { + "ImportPath": "golang.org/x/text/secure/precis", + "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" + }, { "ImportPath": "golang.org/x/text/transform", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" @@ -182,6 +254,10 @@ "ImportPath": "golang.org/x/text/unicode/norm", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, + { + "ImportPath": "golang.org/x/text/width", + "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" + }, { "ImportPath": "golang.org/x/time/rate", "Rev": "f51c12702a4d776e4c1fa9b0fabab841babae631" @@ -330,6 +406,10 @@ "ImportPath": "k8s.io/api/storage/v1beta1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/apimachinery/pkg/api/equality", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/apimachinery/pkg/api/errors", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" @@ -342,6 +422,10 @@ "ImportPath": "k8s.io/apimachinery/pkg/api/resource", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/apimachinery/pkg/api/validation", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" @@ -354,6 +438,10 @@ "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1/validation", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1beta1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" @@ -434,6 +522,10 @@ "ImportPath": "k8s.io/apimachinery/pkg/util/json", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/apimachinery/pkg/util/mergepatch", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/apimachinery/pkg/util/naming", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" @@ -450,6 +542,10 @@ "ImportPath": "k8s.io/apimachinery/pkg/util/sets", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/apimachinery/pkg/util/strategicpatch", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/apimachinery/pkg/util/validation", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" @@ -470,6 +566,10 @@ "ImportPath": "k8s.io/apimachinery/pkg/watch", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/json", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/reflect", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" @@ -486,6 +586,42 @@ "ImportPath": "k8s.io/cli-runtime/pkg/genericclioptions/resource", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/cli-runtime/pkg/kustomize", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, + { + "ImportPath": "k8s.io/cli-runtime/pkg/kustomize/k8sdeps", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, + { + "ImportPath": "k8s.io/cli-runtime/pkg/kustomize/k8sdeps/configmapandsecret", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, + { + "ImportPath": "k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kunstruct", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, + { + "ImportPath": "k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kv", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, + { + "ImportPath": "k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, + { + "ImportPath": "k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/hash", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, + { + "ImportPath": "k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, + { + "ImportPath": "k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/discovery", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" @@ -590,10 +726,106 @@ "ImportPath": "k8s.io/klog", "Rev": "8139d8cb77af419532b33dfa7dd09fbc5f1d344f" }, + { + "ImportPath": "k8s.io/kube-openapi/pkg/common", + "Rev": "d7c86cdc46e3a4fcf892b32dd7bc3aa775e0870e" + }, + { + "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", + "Rev": "d7c86cdc46e3a4fcf892b32dd7bc3aa775e0870e" + }, { "ImportPath": "k8s.io/utils/integer", "Rev": "c2654d5206da6b7b6ace12841e8f359bb89b443c" }, + { + "ImportPath": "sigs.k8s.io/kustomize/pkg/commands/build", + "Rev": "ce7e5ee2c30cc5856fea01fe423cf167f2a2d0c3" + }, + { + "ImportPath": "sigs.k8s.io/kustomize/pkg/constants", + "Rev": "ce7e5ee2c30cc5856fea01fe423cf167f2a2d0c3" + }, + { + "ImportPath": "sigs.k8s.io/kustomize/pkg/expansion", + "Rev": "ce7e5ee2c30cc5856fea01fe423cf167f2a2d0c3" + }, + { + "ImportPath": "sigs.k8s.io/kustomize/pkg/factory", + "Rev": "ce7e5ee2c30cc5856fea01fe423cf167f2a2d0c3" + }, + { + "ImportPath": "sigs.k8s.io/kustomize/pkg/fs", + "Rev": "ce7e5ee2c30cc5856fea01fe423cf167f2a2d0c3" + }, + { + "ImportPath": "sigs.k8s.io/kustomize/pkg/git", + "Rev": "ce7e5ee2c30cc5856fea01fe423cf167f2a2d0c3" + }, + { + "ImportPath": "sigs.k8s.io/kustomize/pkg/gvk", + "Rev": "ce7e5ee2c30cc5856fea01fe423cf167f2a2d0c3" + }, + { + "ImportPath": "sigs.k8s.io/kustomize/pkg/ifc", + "Rev": "ce7e5ee2c30cc5856fea01fe423cf167f2a2d0c3" + }, + { + "ImportPath": "sigs.k8s.io/kustomize/pkg/ifc/transformer", + "Rev": "ce7e5ee2c30cc5856fea01fe423cf167f2a2d0c3" + }, + { + "ImportPath": "sigs.k8s.io/kustomize/pkg/image", + "Rev": "ce7e5ee2c30cc5856fea01fe423cf167f2a2d0c3" + }, + { + "ImportPath": "sigs.k8s.io/kustomize/pkg/internal/error", + "Rev": "ce7e5ee2c30cc5856fea01fe423cf167f2a2d0c3" + }, + { + "ImportPath": "sigs.k8s.io/kustomize/pkg/loader", + "Rev": "ce7e5ee2c30cc5856fea01fe423cf167f2a2d0c3" + }, + { + "ImportPath": "sigs.k8s.io/kustomize/pkg/patch", + "Rev": "ce7e5ee2c30cc5856fea01fe423cf167f2a2d0c3" + }, + { + "ImportPath": "sigs.k8s.io/kustomize/pkg/patch/transformer", + "Rev": "ce7e5ee2c30cc5856fea01fe423cf167f2a2d0c3" + }, + { + "ImportPath": "sigs.k8s.io/kustomize/pkg/resid", + "Rev": "ce7e5ee2c30cc5856fea01fe423cf167f2a2d0c3" + }, + { + "ImportPath": "sigs.k8s.io/kustomize/pkg/resmap", + "Rev": "ce7e5ee2c30cc5856fea01fe423cf167f2a2d0c3" + }, + { + "ImportPath": "sigs.k8s.io/kustomize/pkg/resource", + "Rev": "ce7e5ee2c30cc5856fea01fe423cf167f2a2d0c3" + }, + { + "ImportPath": "sigs.k8s.io/kustomize/pkg/target", + "Rev": "ce7e5ee2c30cc5856fea01fe423cf167f2a2d0c3" + }, + { + "ImportPath": "sigs.k8s.io/kustomize/pkg/transformers", + "Rev": "ce7e5ee2c30cc5856fea01fe423cf167f2a2d0c3" + }, + { + "ImportPath": "sigs.k8s.io/kustomize/pkg/transformers/config", + "Rev": "ce7e5ee2c30cc5856fea01fe423cf167f2a2d0c3" + }, + { + "ImportPath": "sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig", + "Rev": "ce7e5ee2c30cc5856fea01fe423cf167f2a2d0c3" + }, + { + "ImportPath": "sigs.k8s.io/kustomize/pkg/types", + "Rev": "ce7e5ee2c30cc5856fea01fe423cf167f2a2d0c3" + }, { "ImportPath": "sigs.k8s.io/yaml", "Rev": "fd68e9863619f6ec2fdd8625fe1f02e7c877e480" diff --git a/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/clientset.go b/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/clientset.go index 9dd86db1f3..9e1bd6319e 100644 --- a/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/clientset.go +++ b/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/clientset.go @@ -28,8 +28,6 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface SamplecontrollerV1alpha1() samplecontrollerv1alpha1.SamplecontrollerV1alpha1Interface - // Deprecated: please explicitly pick a version if possible. - Samplecontroller() samplecontrollerv1alpha1.SamplecontrollerV1alpha1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -44,12 +42,6 @@ func (c *Clientset) SamplecontrollerV1alpha1() samplecontrollerv1alpha1.Sampleco return c.samplecontrollerV1alpha1 } -// Deprecated: Samplecontroller retrieves the default version of SamplecontrollerClient. -// Please explicitly pick a version. -func (c *Clientset) Samplecontroller() samplecontrollerv1alpha1.SamplecontrollerV1alpha1Interface { - return c.samplecontrollerV1alpha1 -} - // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { diff --git a/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/fake/clientset_generated.go b/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/fake/clientset_generated.go index 6bed11af72..c16cfb3f52 100644 --- a/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/fake/clientset_generated.go +++ b/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/fake/clientset_generated.go @@ -75,8 +75,3 @@ var _ clientset.Interface = &Clientset{} func (c *Clientset) SamplecontrollerV1alpha1() samplecontrollerv1alpha1.SamplecontrollerV1alpha1Interface { return &fakesamplecontrollerv1alpha1.FakeSamplecontrollerV1alpha1{Fake: &c.Fake} } - -// Samplecontroller retrieves the SamplecontrollerV1alpha1Client -func (c *Clientset) Samplecontroller() samplecontrollerv1alpha1.SamplecontrollerV1alpha1Interface { - return &fakesamplecontrollerv1alpha1.FakeSamplecontrollerV1alpha1{Fake: &c.Fake} -} diff --git a/test/cmd/apply.sh b/test/cmd/apply.sh index bcca957d26..b78e00cedd 100755 --- a/test/cmd/apply.sh +++ b/test/cmd/apply.sh @@ -223,6 +223,21 @@ __EOF__ # cleanup kubectl delete -f hack/testdata/service-revision2.yaml "${kube_flags[@]}" + ## kubectl apply -k somedir + kubectl apply -k hack/testdata/kustomize + kube::test::get_object_assert 'configmap test-the-map' "{{${id_field}}}" 'test-the-map' + kube::test::get_object_assert 'deployment test-the-deployment' "{{${id_field}}}" 'test-the-deployment' + kube::test::get_object_assert 'service test-the-service' "{{${id_field}}}" 'test-the-service' + # cleanup + kubectl delete -k hack/testdata/kustomize + + ## kubectl apply --kustomize somedir + kubectl apply --kustomize hack/testdata/kustomize + kube::test::get_object_assert 'configmap test-the-map' "{{${id_field}}}" 'test-the-map' + kube::test::get_object_assert 'deployment test-the-deployment' "{{${id_field}}}" 'test-the-deployment' + kube::test::get_object_assert 'service test-the-service' "{{${id_field}}}" 'test-the-service' + # cleanup + kubectl delete --kustomize hack/testdata/kustomize set +o nounset set +o errexit diff --git a/test/cmd/create.sh b/test/cmd/create.sh index 5792883f56..e21b04d8cf 100755 --- a/test/cmd/create.sh +++ b/test/cmd/create.sh @@ -108,3 +108,28 @@ run_create_job_tests() { set +o nounset set +o errexit } + +run_kubectl_create_kustomization_directory_tests() { + set -o nounset + set -o errexit + + ## kubectl create -k for kustomization directory + # Pre-condition: no ConfigMap, Deployment, Service exist + kube::test::get_object_assert configmaps "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create -k hack/testdata/kustomize + # Post-condition: test-the-map, test-the-deployment, test-the-service exist + + # Check that all items in the list are printed + kube::test::get_object_assert 'configmap test-the-map' "{{${id_field}}}" 'test-the-map' + kube::test::get_object_assert 'deployment test-the-deployment' "{{${id_field}}}" 'test-the-deployment' + kube::test::get_object_assert 'service test-the-service' "{{${id_field}}}" 'test-the-service' + + # cleanup + kubectl delete -k hack/testdata/kustomize + + set +o nounset + set +o errexit +} \ No newline at end of file diff --git a/test/cmd/get.sh b/test/cmd/get.sh index f14e65cb80..936abb6f57 100755 --- a/test/cmd/get.sh +++ b/test/cmd/get.sh @@ -202,6 +202,29 @@ run_kubectl_get_tests() { # cleanup kubectl delete pods redis-master valid-pod "${kube_flags[@]}" + ### Test 'kubectl get -k ' prints all the items built from a kustomization directory + # Pre-condition: no ConfigMap, Deployment, Service exist + kube::test::get_object_assert configmaps "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl apply -k hack/testdata/kustomize + # Post-condition: test-the-map, test-the-deployment, test-the-service exist + + # Check that all items in the list are printed + output_message=$(kubectl get -k hack/testdata/kustomize -o jsonpath="{..metadata.name}" "${kube_flags[@]}") + kube::test::if_has_string "${output_message}" "test-the-map" + kube::test::if_has_string "${output_message}" "test-the-deployment" + kube::test::if_has_string "${output_message}" "test-the-service" + + # cleanup + kubectl delete -k hack/testdata/kustomize + + # Check that all items in the list are deleted + kube::test::get_object_assert configmaps "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" '' + set +o nounset set +o errexit } diff --git a/test/cmd/legacy-script.sh b/test/cmd/legacy-script.sh index bf3e871bef..b84ea798dd 100755 --- a/test/cmd/legacy-script.sh +++ b/test/cmd/legacy-script.sh @@ -510,6 +510,9 @@ runTests() { if kube::test::if_supports_resource "${secrets}" ; then record_command run_create_secret_tests fi + if kube::test::if_supports_resource "${deployments}"; then + record_command run_kubectl_create_kustomization_directory_tests + fi ###################### # Delete # diff --git a/test/conformance/testdata/conformance.txt b/test/conformance/testdata/conformance.txt index ddbec39ed0..1881ea19a3 100755 --- a/test/conformance/testdata/conformance.txt +++ b/test/conformance/testdata/conformance.txt @@ -179,6 +179,7 @@ test/e2e/kubectl/kubectl.go: "should create a job from an image, then delete the test/e2e/kubectl/kubectl.go: "should support proxy with --port 0" test/e2e/kubectl/kubectl.go: "should support --unix-socket=/path" test/e2e/network/dns.go: "should provide DNS for the cluster" +test/e2e/network/dns.go: "should provide /etc/hosts entries for the cluster" test/e2e/network/dns.go: "should provide DNS for services" test/e2e/network/proxy.go: "should proxy logs on node with explicit kubelet port using proxy subresource" test/e2e/network/proxy.go: "should proxy logs on node using proxy subresource" diff --git a/test/e2e/apimachinery/resource_quota.go b/test/e2e/apimachinery/resource_quota.go index c0a3545f4e..0bb69bbd02 100644 --- a/test/e2e/apimachinery/resource_quota.go +++ b/test/e2e/apimachinery/resource_quota.go @@ -664,7 +664,51 @@ var _ = SIGDescribe("ResourceQuota", func() { err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) Expect(err).NotTo(HaveOccurred()) }) + It("Should be able to update and delete ResourceQuota.", func() { + client := f.ClientSet + ns := f.Namespace.Name + By("Creating a ResourceQuota") + quotaName := "test-quota" + resourceQuota := &v1.ResourceQuota{ + Spec: v1.ResourceQuotaSpec{ + Hard: v1.ResourceList{}, + }, + } + resourceQuota.ObjectMeta.Name = quotaName + resourceQuota.Spec.Hard[v1.ResourceCPU] = resource.MustParse("1") + resourceQuota.Spec.Hard[v1.ResourceMemory] = resource.MustParse("500Mi") + _, err := createResourceQuota(client, ns, resourceQuota) + Expect(err).NotTo(HaveOccurred()) + + By("Getting a ResourceQuota") + resourceQuotaResult, err := client.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(resourceQuotaResult.Spec.Hard[v1.ResourceCPU]).To(Equal(resource.MustParse("1"))) + Expect(resourceQuotaResult.Spec.Hard[v1.ResourceMemory]).To(Equal(resource.MustParse("500Mi"))) + + By("Updating a ResourceQuota") + resourceQuota.Spec.Hard[v1.ResourceCPU] = resource.MustParse("2") + resourceQuota.Spec.Hard[v1.ResourceMemory] = resource.MustParse("1Gi") + resourceQuotaResult, err = client.CoreV1().ResourceQuotas(ns).Update(resourceQuota) + Expect(err).NotTo(HaveOccurred()) + Expect(resourceQuotaResult.Spec.Hard[v1.ResourceCPU]).To(Equal(resource.MustParse("2"))) + Expect(resourceQuotaResult.Spec.Hard[v1.ResourceMemory]).To(Equal(resource.MustParse("1Gi"))) + + By("Verifying a ResourceQuota was modified") + resourceQuotaResult, err = client.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(resourceQuotaResult.Spec.Hard[v1.ResourceCPU]).To(Equal(resource.MustParse("2"))) + Expect(resourceQuotaResult.Spec.Hard[v1.ResourceMemory]).To(Equal(resource.MustParse("1Gi"))) + + By("Deleting a ResourceQuota") + err = deleteResourceQuota(client, ns, quotaName) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying the deleted ResourceQuota") + _, err = client.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) + Expect(errors.IsNotFound(err)).To(Equal(true)) + }) }) var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index 92de1b4109..09b5d2c165 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -64,6 +64,7 @@ const ( dummyValidatingWebhookConfigName = "e2e-test-dummy-validating-webhook-config" dummyMutatingWebhookConfigName = "e2e-test-dummy-mutating-webhook-config" crdWebhookConfigName = "e2e-test-webhook-config-crd" + slowWebhookConfigName = "e2e-test-webhook-config-slow" skipNamespaceLabelKey = "skip-webhook-admission" skipNamespaceLabelValue = "yes" @@ -201,6 +202,31 @@ var _ = SIGDescribe("AdmissionWebhook", func() { testCRDDenyWebhook(f) }) + It("Should honor timeout", func() { + policyFail := v1beta1.Fail + policyIgnore := v1beta1.Ignore + + By("Setting timeout (1s) shorter than webhook latency (5s)") + slowWebhookCleanup := registerSlowWebhook(f, context, &policyFail, int32Ptr(1)) + testSlowWebhookTimeoutFailEarly(f) + slowWebhookCleanup() + + By("Having no error when timeout is shorter than webhook latency and failure policy is ignore") + slowWebhookCleanup = registerSlowWebhook(f, context, &policyIgnore, int32Ptr(1)) + testSlowWebhookTimeoutNoError(f) + slowWebhookCleanup() + + By("Having no error when timeout is longer than webhook latency") + slowWebhookCleanup = registerSlowWebhook(f, context, &policyFail, int32Ptr(10)) + testSlowWebhookTimeoutNoError(f) + slowWebhookCleanup() + + By("Having no error when timeout is empty (defaulted to 10s in v1beta1)") + slowWebhookCleanup = registerSlowWebhook(f, context, &policyFail, nil) + testSlowWebhookTimeoutNoError(f) + slowWebhookCleanup() + }) + // TODO: add more e2e tests for mutating webhooks // 1. mutating webhook that mutates pod // 2. mutating webhook that sends empty patch @@ -357,6 +383,8 @@ func deployWebhookAndService(f *framework.Framework, image string, context *cert func strPtr(s string) *string { return &s } +func int32Ptr(i int32) *int32 { return &i } + func registerWebhook(f *framework.Framework, context *certContext) func() { client := f.ClientSet By("Registering the webhook via the AdmissionRegistration API") @@ -1445,3 +1473,69 @@ func testCRDDenyWebhook(f *framework.Framework) { framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error()) } } + +func registerSlowWebhook(f *framework.Framework, context *certContext, policy *v1beta1.FailurePolicyType, timeout *int32) func() { + client := f.ClientSet + By("Registering slow webhook via the AdmissionRegistration API") + + namespace := f.Namespace.Name + configName := slowWebhookConfigName + + _, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: configName, + }, + Webhooks: []v1beta1.Webhook{ + { + Name: "allow-configmap-with-delay-webhook.k8s.io", + Rules: []v1beta1.RuleWithOperations{{ + Operations: []v1beta1.OperationType{v1beta1.Create}, + Rule: v1beta1.Rule{ + APIGroups: []string{""}, + APIVersions: []string{"v1"}, + Resources: []string{"configmaps"}, + }, + }}, + ClientConfig: v1beta1.WebhookClientConfig{ + Service: &v1beta1.ServiceReference{ + Namespace: namespace, + Name: serviceName, + Path: strPtr("/always-allow-delay-5s"), + }, + CABundle: context.signingCert, + }, + FailurePolicy: policy, + TimeoutSeconds: timeout, + }, + }, + }) + framework.ExpectNoError(err, "registering slow webhook config %s with namespace %s", configName, namespace) + + // The webhook configuration is honored in 10s. + time.Sleep(10 * time.Second) + + return func() { + client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(configName, nil) + } +} + +func testSlowWebhookTimeoutFailEarly(f *framework.Framework) { + By("Request fails when timeout (1s) is shorter than slow webhook latency (5s)") + client := f.ClientSet + name := "e2e-test-slow-webhook-configmap" + _, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(&v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}}) + Expect(err).To(HaveOccurred(), "create configmap in namespace %s should have timed-out reaching slow webhook", f.Namespace.Name) + expectedErrMsg := `/always-allow-delay-5s?timeout=1s: context deadline exceeded` + if !strings.Contains(err.Error(), expectedErrMsg) { + framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error()) + } +} + +func testSlowWebhookTimeoutNoError(f *framework.Framework) { + client := f.ClientSet + name := "e2e-test-slow-webhook-configmap" + _, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(&v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}}) + Expect(err).To(BeNil()) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(name, &metav1.DeleteOptions{}) + Expect(err).To(BeNil()) +} diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index 6acfc57d84..3008ae47b9 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -93,7 +93,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { f = framework.NewDefaultFramework("daemonsets") - image := framework.ServeHostnameImage + image := NginxImage dsName := "daemon-set" var ns string @@ -350,8 +350,15 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch)) Expect(err).NotTo(HaveOccurred()) + // Time to complete the rolling upgrade is proportional to the number of nodes in the cluster. + // Get the number of nodes, and set the timeout appropriately. + nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) + Expect(err).NotTo(HaveOccurred()) + nodeCount := len(nodes.Items) + retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second + By("Check that daemon pods images are updated.") - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1)) + err = wait.PollImmediate(dsRetryPeriod, retryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1)) Expect(err).NotTo(HaveOccurred()) By("Check that daemon pods are still running on every node of the cluster.") diff --git a/test/e2e/cloud/framework.go b/test/e2e/cloud/framework.go index 6f6ae11669..16c8e7ef1f 100644 --- a/test/e2e/cloud/framework.go +++ b/test/e2e/cloud/framework.go @@ -18,6 +18,7 @@ package cloud import "k8s.io/kubernetes/test/e2e/framework" +// SIGDescribe annotates the test with the SIG label. func SIGDescribe(text string, body func()) bool { return framework.KubeDescribe("[sig-cloud-provider] "+text, body) } diff --git a/test/e2e/cloud/nodes.go b/test/e2e/cloud/nodes.go index a3e8e0315a..8d097082ea 100644 --- a/test/e2e/cloud/nodes.go +++ b/test/e2e/cloud/nodes.go @@ -24,23 +24,23 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() { f := framework.NewDefaultFramework("cloudprovider") var c clientset.Interface - BeforeEach(func() { + ginkgo.BeforeEach(func() { // Only supported in AWS/GCE because those are the only cloud providers // where E2E test are currently running. framework.SkipUnlessProviderIs("aws", "gce", "gke") c = f.ClientSet }) - It("should be deleted on API server if it doesn't exist in the cloud provider", func() { - By("deleting a node on the cloud provider") + ginkgo.It("should be deleted on API server if it doesn't exist in the cloud provider", func() { + ginkgo.By("deleting a node on the cloud provider") nodeDeleteCandidates := framework.GetReadySchedulableNodesOrDie(c) nodeToDelete := nodeDeleteCandidates.Items[0] @@ -54,8 +54,8 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() { } newNodes, err := framework.CheckNodesReady(c, len(origNodes.Items)-1, 5*time.Minute) - Expect(err).To(BeNil()) - Expect(len(newNodes)).To(Equal(len(origNodes.Items) - 1)) + gomega.Expect(err).To(gomega.BeNil()) + gomega.Expect(len(newNodes)).To(gomega.Equal(len(origNodes.Items) - 1)) _, err = c.CoreV1().Nodes().Get(nodeToDelete.Name, metav1.GetOptions{}) if err == nil { diff --git a/test/e2e/common/BUILD b/test/e2e/common/BUILD index bcd82581dc..be73bd777a 100644 --- a/test/e2e/common/BUILD +++ b/test/e2e/common/BUILD @@ -43,7 +43,6 @@ go_library( ], importpath = "k8s.io/kubernetes/test/e2e/common", deps = [ - "//pkg/api/v1/node:go_default_library", "//pkg/api/v1/pod:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", diff --git a/test/e2e/common/node_lease.go b/test/e2e/common/node_lease.go index c4477df0ae..b3a113b86d 100644 --- a/test/e2e/common/node_lease.go +++ b/test/e2e/common/node_lease.go @@ -24,9 +24,8 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" - - v1node "k8s.io/kubernetes/pkg/api/v1/node" "k8s.io/kubernetes/test/e2e/framework" + testutils "k8s.io/kubernetes/test/utils" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -123,7 +122,7 @@ var _ = framework.KubeDescribe("NodeLease", func() { // run controller manager, i.e., no node lifecycle controller. node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) Expect(err).To(BeNil()) - _, readyCondition := v1node.GetNodeCondition(&node.Status, corev1.NodeReady) + _, readyCondition := testutils.GetNodeCondition(&node.Status, corev1.NodeReady) Expect(readyCondition.Status).To(Equal(corev1.ConditionTrue)) }) }) @@ -136,7 +135,7 @@ func getNextReadyConditionHeartbeatTime(clientSet clientset.Interface, nodeName if err != nil { return err } - _, readyCondition := v1node.GetNodeCondition(&node.Status, corev1.NodeReady) + _, readyCondition := testutils.GetNodeCondition(&node.Status, corev1.NodeReady) Expect(readyCondition.Status).To(Equal(corev1.ConditionTrue)) newHeartbeatTime = readyCondition.LastHeartbeatTime if prevHeartbeatTime.Before(&newHeartbeatTime) { diff --git a/test/e2e/common/pods.go b/test/e2e/common/pods.go index b420e1e69a..d7cf8d6c34 100644 --- a/test/e2e/common/pods.go +++ b/test/e2e/common/pods.go @@ -29,11 +29,14 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + watchtools "k8s.io/client-go/tools/watch" podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/kubelet" "k8s.io/kubernetes/test/e2e/framework" @@ -192,8 +195,30 @@ var _ = framework.KubeDescribe("Pods", func() { LabelSelector: selector.String(), ResourceVersion: pods.ListMeta.ResourceVersion, } - w, err := podClient.Watch(options) - Expect(err).NotTo(HaveOccurred(), "failed to set up watch") + + listCompleted := make(chan bool, 1) + lw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.LabelSelector = selector.String() + podList, err := podClient.List(options) + if err == nil { + select { + case listCompleted <- true: + framework.Logf("observed the pod list") + return podList, err + default: + framework.Logf("channel blocked") + } + } + return podList, err + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.LabelSelector = selector.String() + return podClient.Watch(options) + }, + } + _, _, w, _ := watchtools.NewIndexerInformerWatcher(lw, &v1.Pod{}) + defer w.Stop() By("submitting the pod to kubernetes") podClient.Create(pod) @@ -207,12 +232,17 @@ var _ = framework.KubeDescribe("Pods", func() { By("verifying pod creation was observed") select { - case event, _ := <-w.ResultChan(): - if event.Type != watch.Added { - framework.Failf("Failed to observe pod creation: %v", event) + case <-listCompleted: + select { + case event, _ := <-w.ResultChan(): + if event.Type != watch.Added { + framework.Failf("Failed to observe pod creation: %v", event) + } + case <-time.After(framework.PodStartTimeout): + framework.Failf("Timeout while waiting for pod creation") } - case <-time.After(framework.PodStartTimeout): - framework.Failf("Timeout while waiting for pod creation") + case <-time.After(10 * time.Second): + framework.Failf("Timeout while waiting to observe pod list") } // We need to wait for the pod to be running, otherwise the deletion @@ -221,7 +251,6 @@ var _ = framework.KubeDescribe("Pods", func() { // save the running pod pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred(), "failed to GET scheduled pod") - framework.Logf("running pod: %#v", pod) By("deleting the pod gracefully") err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(30)) diff --git a/test/e2e/framework/ingress/ingress_utils.go b/test/e2e/framework/ingress/ingress_utils.go index 0880eb4a73..cbde86d5e2 100644 --- a/test/e2e/framework/ingress/ingress_utils.go +++ b/test/e2e/framework/ingress/ingress_utils.go @@ -53,28 +53,28 @@ import ( "k8s.io/kubernetes/test/e2e/manifest" testutils "k8s.io/kubernetes/test/utils" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" ) const ( rsaBits = 2048 validFor = 365 * 24 * time.Hour - // Ingress class annotation defined in ingress repository. + // IngressClassKey is ingress class annotation defined in ingress repository. // TODO: All these annotations should be reused from // ingress-gce/pkg/annotations instead of duplicating them here. IngressClassKey = "kubernetes.io/ingress.class" - // Ingress class annotation value for multi cluster ingress. + // MulticlusterIngressClassValue is ingress class annotation value for multi cluster ingress. MulticlusterIngressClassValue = "gce-multi-cluster" - // Static IP annotation defined in ingress repository. + // IngressStaticIPKey is static IP annotation defined in ingress repository. IngressStaticIPKey = "kubernetes.io/ingress.global-static-ip-name" - // Allow HTTP annotation defined in ingress repository. + // IngressAllowHTTPKey is Allow HTTP annotation defined in ingress repository. IngressAllowHTTPKey = "kubernetes.io/ingress.allow-http" - // Pre-shared-cert annotation defined in ingress repository. + // IngressPreSharedCertKey is Pre-shared-cert annotation defined in ingress repository. IngressPreSharedCertKey = "ingress.gcp.kubernetes.io/pre-shared-cert" // ServiceApplicationProtocolKey annotation defined in ingress repository. @@ -95,45 +95,52 @@ const ( // General cloud resource poll timeout (eg: create static ip, firewall etc) cloudResourcePollTimeout = 5 * time.Minute - NEGAnnotation = "cloud.google.com/neg" + // NEGAnnotation is NEG annotation. + NEGAnnotation = "cloud.google.com/neg" + + // NEGStatusAnnotation is NEG status annotation. NEGStatusAnnotation = "cloud.google.com/neg-status" - NEGUpdateTimeout = 2 * time.Minute - InstanceGroupAnnotation = "ingress.gcp.kubernetes.io/instance-groups" - - // Prefix for annotation keys used by the ingress controller to specify the + // StatusPrefix is prefix for annotation keys used by the ingress controller to specify the // names of GCP resources such as forwarding rules, url maps, target proxies, etc // that it created for the corresponding ingress. StatusPrefix = "ingress.kubernetes.io" ) +// TestLogger is an interface for log. type TestLogger interface { Infof(format string, args ...interface{}) Errorf(format string, args ...interface{}) } +// GLogger is test logger. type GLogger struct{} +// Infof outputs log with info level. func (l *GLogger) Infof(format string, args ...interface{}) { klog.Infof(format, args...) } +// Errorf outputs log with error level. func (l *GLogger) Errorf(format string, args ...interface{}) { klog.Errorf(format, args...) } +// E2ELogger is test logger. type E2ELogger struct{} +// Infof outputs log. func (l *E2ELogger) Infof(format string, args ...interface{}) { framework.Logf(format, args...) } +// Errorf outputs log. func (l *E2ELogger) Errorf(format string, args ...interface{}) { framework.Logf(format, args...) } -// IngressConformanceTests contains a closure with an entry and exit log line. -type IngressConformanceTests struct { +// ConformanceTests contains a closure with an entry and exit log line. +type ConformanceTests struct { EntryLog string Execute func() ExitLog string @@ -152,7 +159,7 @@ type NegStatus struct { // CreateIngressComformanceTests generates an slice of sequential test cases: // a simple http ingress, ingress with HTTPS, ingress HTTPS with a modified hostname, // ingress https with a modified URLMap -func CreateIngressComformanceTests(jig *IngressTestJig, ns string, annotations map[string]string) []IngressConformanceTests { +func CreateIngressComformanceTests(jig *TestJig, ns string, annotations map[string]string) []ConformanceTests { manifestPath := filepath.Join(IngressManifestPath, "http") // These constants match the manifests used in IngressManifestPath tlsHost := "foo.bar.com" @@ -161,7 +168,7 @@ func CreateIngressComformanceTests(jig *IngressTestJig, ns string, annotations m updateURLMapHost := "bar.baz.com" updateURLMapPath := "/testurl" // Platform agnostic list of tests that must be satisfied by all controllers - tests := []IngressConformanceTests{ + tests := []ConformanceTests{ { fmt.Sprintf("should create a basic HTTP ingress"), func() { jig.CreateIngress(manifestPath, ns, annotations, annotations) }, @@ -201,7 +208,7 @@ func CreateIngressComformanceTests(jig *IngressTestJig, ns string, annotations m } ing.Spec.Rules = newRules }) - By("Checking that " + pathToFail + " is not exposed by polling for failure") + ginkgo.By("Checking that " + pathToFail + " is not exposed by polling for failure") route := fmt.Sprintf("http://%v%v", jig.Address, pathToFail) framework.ExpectNoError(framework.PollURL(route, updateURLMapHost, framework.LoadBalancerCleanupTimeout, jig.PollInterval, &http.Client{Timeout: IngressReqTimeout}, true)) }, @@ -210,7 +217,7 @@ func CreateIngressComformanceTests(jig *IngressTestJig, ns string, annotations m } // Skip the Update TLS cert test for kubemci: https://github.com/GoogleCloudPlatform/k8s-multicluster-ingress/issues/141. if jig.Class != MulticlusterIngressClassValue { - tests = append(tests, IngressConformanceTests{ + tests = append(tests, ConformanceTests{ fmt.Sprintf("should update SSL certificate with modified hostname %v", updatedTLSHost), func() { jig.Update(func(ing *extensions.Ingress) { @@ -351,8 +358,8 @@ func createTLSSecret(kubeClient clientset.Interface, namespace, secretName strin return host, cert, key, err } -// IngressTestJig holds the relevant state and parameters of the ingress test. -type IngressTestJig struct { +// TestJig holds the relevant state and parameters of the ingress test. +type TestJig struct { Client clientset.Interface Logger TestLogger @@ -369,8 +376,8 @@ type IngressTestJig struct { } // NewIngressTestJig instantiates struct with client -func NewIngressTestJig(c clientset.Interface) *IngressTestJig { - return &IngressTestJig{ +func NewIngressTestJig(c clientset.Interface) *TestJig { + return &TestJig{ Client: c, RootCAs: map[string][]byte{}, PollInterval: framework.LoadBalancerPollInterval, @@ -383,13 +390,13 @@ func NewIngressTestJig(c clientset.Interface) *IngressTestJig { // Optional: secret.yaml, ingAnnotations // If ingAnnotations is specified it will overwrite any annotations in ing.yaml // If svcAnnotations is specified it will overwrite any annotations in svc.yaml -func (j *IngressTestJig) CreateIngress(manifestPath, ns string, ingAnnotations map[string]string, svcAnnotations map[string]string) { +func (j *TestJig) CreateIngress(manifestPath, ns string, ingAnnotations map[string]string, svcAnnotations map[string]string) { var err error read := func(file string) string { - return string(testfiles.ReadOrDie(filepath.Join(manifestPath, file), Fail)) + return string(testfiles.ReadOrDie(filepath.Join(manifestPath, file), ginkgo.Fail)) } exists := func(file string) bool { - return testfiles.Exists(filepath.Join(manifestPath, file), Fail) + return testfiles.Exists(filepath.Join(manifestPath, file), ginkgo.Fail) } j.Logger.Infof("creating replication controller") @@ -426,7 +433,7 @@ func (j *IngressTestJig) CreateIngress(manifestPath, ns string, ingAnnotations m } // runCreate runs the required command to create the given ingress. -func (j *IngressTestJig) runCreate(ing *extensions.Ingress) (*extensions.Ingress, error) { +func (j *TestJig) runCreate(ing *extensions.Ingress) (*extensions.Ingress, error) { if j.Class != MulticlusterIngressClassValue { return j.Client.ExtensionsV1beta1().Ingresses(ing.Namespace).Create(ing) } @@ -440,7 +447,7 @@ func (j *IngressTestJig) runCreate(ing *extensions.Ingress) (*extensions.Ingress } // runUpdate runs the required command to update the given ingress. -func (j *IngressTestJig) runUpdate(ing *extensions.Ingress) (*extensions.Ingress, error) { +func (j *TestJig) runUpdate(ing *extensions.Ingress) (*extensions.Ingress, error) { if j.Class != MulticlusterIngressClassValue { return j.Client.ExtensionsV1beta1().Ingresses(ing.Namespace).Update(ing) } @@ -455,7 +462,7 @@ func (j *IngressTestJig) runUpdate(ing *extensions.Ingress) (*extensions.Ingress } // Update retrieves the ingress, performs the passed function, and then updates it. -func (j *IngressTestJig) Update(update func(ing *extensions.Ingress)) { +func (j *TestJig) Update(update func(ing *extensions.Ingress)) { var err error ns, name := j.Ingress.Namespace, j.Ingress.Name for i := 0; i < 3; i++ { @@ -477,7 +484,7 @@ func (j *IngressTestJig) Update(update func(ing *extensions.Ingress)) { } // AddHTTPS updates the ingress to add this secret for these hosts. -func (j *IngressTestJig) AddHTTPS(secretName string, hosts ...string) { +func (j *TestJig) AddHTTPS(secretName string, hosts ...string) { // TODO: Just create the secret in GetRootCAs once we're watching secrets in // the ingress controller. _, cert, _, err := createTLSSecret(j.Client, j.Ingress.Namespace, secretName, hosts...) @@ -490,7 +497,7 @@ func (j *IngressTestJig) AddHTTPS(secretName string, hosts ...string) { } // SetHTTPS updates the ingress to use only this secret for these hosts. -func (j *IngressTestJig) SetHTTPS(secretName string, hosts ...string) { +func (j *TestJig) SetHTTPS(secretName string, hosts ...string) { _, cert, _, err := createTLSSecret(j.Client, j.Ingress.Namespace, secretName, hosts...) framework.ExpectNoError(err) j.Logger.Infof("Updating ingress %v to only use secret %v for TLS termination", j.Ingress.Name, secretName) @@ -502,7 +509,7 @@ func (j *IngressTestJig) SetHTTPS(secretName string, hosts ...string) { // RemoveHTTPS updates the ingress to not use this secret for TLS. // Note: Does not delete the secret. -func (j *IngressTestJig) RemoveHTTPS(secretName string) { +func (j *TestJig) RemoveHTTPS(secretName string) { newTLS := []extensions.IngressTLS{} for _, ingressTLS := range j.Ingress.Spec.TLS { if secretName != ingressTLS.SecretName { @@ -517,7 +524,7 @@ func (j *IngressTestJig) RemoveHTTPS(secretName string) { } // PrepareTLSSecret creates a TLS secret and caches the cert. -func (j *IngressTestJig) PrepareTLSSecret(namespace, secretName string, hosts ...string) error { +func (j *TestJig) PrepareTLSSecret(namespace, secretName string, hosts ...string) error { _, cert, _, err := createTLSSecret(j.Client, namespace, secretName, hosts...) if err != nil { return err @@ -527,7 +534,7 @@ func (j *IngressTestJig) PrepareTLSSecret(namespace, secretName string, hosts .. } // GetRootCA returns a rootCA from the ingress test jig. -func (j *IngressTestJig) GetRootCA(secretName string) (rootCA []byte) { +func (j *TestJig) GetRootCA(secretName string) (rootCA []byte) { var ok bool rootCA, ok = j.RootCAs[secretName] if !ok { @@ -537,25 +544,18 @@ func (j *IngressTestJig) GetRootCA(secretName string) (rootCA []byte) { } // TryDeleteIngress attempts to delete the ingress resource and logs errors if they occur. -func (j *IngressTestJig) TryDeleteIngress() { - j.TryDeleteGivenIngress(j.Ingress) +func (j *TestJig) TryDeleteIngress() { + j.tryDeleteGivenIngress(j.Ingress) } -func (j *IngressTestJig) TryDeleteGivenIngress(ing *extensions.Ingress) { +func (j *TestJig) tryDeleteGivenIngress(ing *extensions.Ingress) { if err := j.runDelete(ing); err != nil { j.Logger.Infof("Error while deleting the ingress %v/%v with class %s: %v", ing.Namespace, ing.Name, j.Class, err) } } -func (j *IngressTestJig) TryDeleteGivenService(svc *v1.Service) { - err := j.Client.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil) - if err != nil { - j.Logger.Infof("Error while deleting the service %v/%v: %v", svc.Namespace, svc.Name, err) - } -} - // runDelete runs the required command to delete the given ingress. -func (j *IngressTestJig) runDelete(ing *extensions.Ingress) error { +func (j *TestJig) runDelete(ing *extensions.Ingress) error { if j.Class != MulticlusterIngressClassValue { return j.Client.ExtensionsV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, nil) } @@ -614,7 +614,7 @@ func getIngressAddress(client clientset.Interface, ns, name, class string) ([]st } // WaitForIngressAddress waits for the Ingress to acquire an address. -func (j *IngressTestJig) WaitForIngressAddress(c clientset.Interface, ns, ingName string, timeout time.Duration) (string, error) { +func (j *TestJig) WaitForIngressAddress(c clientset.Interface, ns, ingName string, timeout time.Duration) (string, error) { var address string err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) { ipOrNameList, err := getIngressAddress(c, ns, ingName, j.Class) @@ -632,7 +632,7 @@ func (j *IngressTestJig) WaitForIngressAddress(c clientset.Interface, ns, ingNam return address, err } -func (j *IngressTestJig) pollIngressWithCert(ing *extensions.Ingress, address string, knownHosts []string, cert []byte, waitForNodePort bool, timeout time.Duration) error { +func (j *TestJig) pollIngressWithCert(ing *extensions.Ingress, address string, knownHosts []string, cert []byte, waitForNodePort bool, timeout time.Duration) error { // Check that all rules respond to a simple GET. knownHostsSet := sets.NewString(knownHosts...) for _, rules := range ing.Spec.Rules { @@ -666,7 +666,8 @@ func (j *IngressTestJig) pollIngressWithCert(ing *extensions.Ingress, address st return nil } -func (j *IngressTestJig) WaitForIngress(waitForNodePort bool) { +// WaitForIngress waits for the Ingress to get an address. +func (j *TestJig) WaitForIngress(waitForNodePort bool) { if err := j.WaitForGivenIngressWithTimeout(j.Ingress, waitForNodePort, framework.LoadBalancerPollTimeout); err != nil { framework.Failf("error in waiting for ingress to get an address: %s", err) } @@ -677,7 +678,7 @@ func (j *IngressTestJig) WaitForIngress(waitForNodePort bool) { // http or https). If waitForNodePort is true, the NodePort of the Service // is verified before verifying the Ingress. NodePort is currently a // requirement for cloudprovider Ingress. -func (j *IngressTestJig) WaitForGivenIngressWithTimeout(ing *extensions.Ingress, waitForNodePort bool, timeout time.Duration) error { +func (j *TestJig) WaitForGivenIngressWithTimeout(ing *extensions.Ingress, waitForNodePort bool, timeout time.Duration) error { // Wait for the loadbalancer IP. address, err := j.WaitForIngressAddress(j.Client, ing.Namespace, ing.Name, timeout) if err != nil { @@ -693,12 +694,12 @@ func (j *IngressTestJig) WaitForGivenIngressWithTimeout(ing *extensions.Ingress, return j.pollIngressWithCert(ing, address, knownHosts, cert, waitForNodePort, timeout) } -// WaitForIngress waits till the ingress acquires an IP, then waits for its +// WaitForIngressWithCert waits till the ingress acquires an IP, then waits for its // hosts/urls to respond to a protocol check (either http or https). If // waitForNodePort is true, the NodePort of the Service is verified before // verifying the Ingress. NodePort is currently a requirement for cloudprovider // Ingress. Hostnames and certificate need to be explicitly passed in. -func (j *IngressTestJig) WaitForIngressWithCert(waitForNodePort bool, knownHosts []string, cert []byte) error { +func (j *TestJig) WaitForIngressWithCert(waitForNodePort bool, knownHosts []string, cert []byte) error { // Wait for the loadbalancer IP. address, err := j.WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, framework.LoadBalancerPollTimeout) if err != nil { @@ -710,7 +711,7 @@ func (j *IngressTestJig) WaitForIngressWithCert(waitForNodePort bool, knownHosts // VerifyURL polls for the given iterations, in intervals, and fails if the // given url returns a non-healthy http code even once. -func (j *IngressTestJig) VerifyURL(route, host string, iterations int, interval time.Duration, httpClient *http.Client) error { +func (j *TestJig) VerifyURL(route, host string, iterations int, interval time.Duration, httpClient *http.Client) error { for i := 0; i < iterations; i++ { b, err := framework.SimpleGET(httpClient, route, host) if err != nil { @@ -723,7 +724,7 @@ func (j *IngressTestJig) VerifyURL(route, host string, iterations int, interval return nil } -func (j *IngressTestJig) pollServiceNodePort(ns, name string, port int) error { +func (j *TestJig) pollServiceNodePort(ns, name string, port int) error { // TODO: Curl all nodes? u, err := framework.GetNodePortURL(j.Client, ns, name, port) if err != nil { @@ -735,7 +736,7 @@ func (j *IngressTestJig) pollServiceNodePort(ns, name string, port int) error { // GetIngressNodePorts returns related backend services' nodePorts. // Current GCE ingress controller allows traffic to the default HTTP backend // by default, so retrieve its nodePort if includeDefaultBackend is true. -func (j *IngressTestJig) GetIngressNodePorts(includeDefaultBackend bool) []string { +func (j *TestJig) GetIngressNodePorts(includeDefaultBackend bool) []string { nodePorts := []string{} svcPorts := j.GetServicePorts(includeDefaultBackend) for _, svcPort := range svcPorts { @@ -747,7 +748,7 @@ func (j *IngressTestJig) GetIngressNodePorts(includeDefaultBackend bool) []strin // GetServicePorts returns related backend services' svcPorts. // Current GCE ingress controller allows traffic to the default HTTP backend // by default, so retrieve its nodePort if includeDefaultBackend is true. -func (j *IngressTestJig) GetServicePorts(includeDefaultBackend bool) map[string]v1.ServicePort { +func (j *TestJig) GetServicePorts(includeDefaultBackend bool) map[string]v1.ServicePort { svcPorts := make(map[string]v1.ServicePort) if includeDefaultBackend { defaultSvc, err := j.Client.CoreV1().Services(metav1.NamespaceSystem).Get(defaultBackendName, metav1.GetOptions{}) @@ -773,7 +774,7 @@ func (j *IngressTestJig) GetServicePorts(includeDefaultBackend bool) map[string] } // ConstructFirewallForIngress returns the expected GCE firewall rule for the ingress resource -func (j *IngressTestJig) ConstructFirewallForIngress(firewallRuleName string, nodeTags []string) *compute.Firewall { +func (j *TestJig) ConstructFirewallForIngress(firewallRuleName string, nodeTags []string) *compute.Firewall { nodePorts := j.GetIngressNodePorts(true) fw := compute.Firewall{} @@ -790,7 +791,7 @@ func (j *IngressTestJig) ConstructFirewallForIngress(firewallRuleName string, no } // GetDistinctResponseFromIngress tries GET call to the ingress VIP and return all distinct responses. -func (j *IngressTestJig) GetDistinctResponseFromIngress() (sets.String, error) { +func (j *TestJig) GetDistinctResponseFromIngress() (sets.String, error) { // Wait for the loadbalancer IP. address, err := j.WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, framework.LoadBalancerPollTimeout) if err != nil { @@ -823,7 +824,7 @@ type NginxIngressController struct { // Init initializes the NginxIngressController func (cont *NginxIngressController) Init() { read := func(file string) string { - return string(testfiles.ReadOrDie(filepath.Join(IngressManifestPath, "nginx", file), Fail)) + return string(testfiles.ReadOrDie(filepath.Join(IngressManifestPath, "nginx", file), ginkgo.Fail)) } framework.Logf("initializing nginx ingress controller") framework.RunKubectlOrDieInput(read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", cont.Ns)) @@ -921,7 +922,7 @@ func generateBacksideHTTPSDeploymentSpec() *apps.Deployment { } // SetUpBacksideHTTPSIngress sets up deployment, service and ingress with backside HTTPS configured. -func (j *IngressTestJig) SetUpBacksideHTTPSIngress(cs clientset.Interface, namespace string, staticIPName string) (*apps.Deployment, *v1.Service, *extensions.Ingress, error) { +func (j *TestJig) SetUpBacksideHTTPSIngress(cs clientset.Interface, namespace string, staticIPName string) (*apps.Deployment, *v1.Service, *extensions.Ingress, error) { deployCreated, err := cs.AppsV1().Deployments(namespace).Create(generateBacksideHTTPSDeploymentSpec()) if err != nil { return nil, nil, nil, err @@ -945,7 +946,7 @@ func (j *IngressTestJig) SetUpBacksideHTTPSIngress(cs clientset.Interface, names } // DeleteTestResource deletes given deployment, service and ingress. -func (j *IngressTestJig) DeleteTestResource(cs clientset.Interface, deploy *apps.Deployment, svc *v1.Service, ing *extensions.Ingress) []error { +func (j *TestJig) DeleteTestResource(cs clientset.Interface, deploy *apps.Deployment, svc *v1.Service, ing *extensions.Ingress) []error { var errs []error if ing != nil { if err := j.runDelete(ing); err != nil { diff --git a/test/e2e/framework/podlogs/podlogs.go b/test/e2e/framework/podlogs/podlogs.go index cf24571d96..90911e885f 100644 --- a/test/e2e/framework/podlogs/podlogs.go +++ b/test/e2e/framework/podlogs/podlogs.go @@ -27,7 +27,6 @@ import ( "bytes" "context" "fmt" - "github.com/pkg/errors" "io" "os" "path" @@ -35,6 +34,8 @@ import ( "strings" "sync" + "github.com/pkg/errors" + "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -46,7 +47,7 @@ import ( // rpc error: code = Unknown desc = Error: No such container: 41a... // when the pod gets deleted while streaming. func LogsForPod(ctx context.Context, cs clientset.Interface, ns, pod string, opts *v1.PodLogOptions) (io.ReadCloser, error) { - req := cs.Core().Pods(ns).GetLogs(pod, opts) + req := cs.CoreV1().Pods(ns).GetLogs(pod, opts) return req.Context(ctx).Stream() } @@ -78,7 +79,7 @@ var expectedErrors = regexp.MustCompile(`container .* in pod .* is (terminated|w // running pods, but that then would have the disadvantage that // already deleted pods aren't covered. func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogOutput) error { - watcher, err := cs.Core().Pods(ns).Watch(meta.ListOptions{}) + watcher, err := cs.CoreV1().Pods(ns).Watch(meta.ListOptions{}) if err != nil { return errors.Wrap(err, "cannot create Pod event watcher") } @@ -90,7 +91,7 @@ func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogO m.Lock() defer m.Unlock() - pods, err := cs.Core().Pods(ns).List(meta.ListOptions{}) + pods, err := cs.CoreV1().Pods(ns).List(meta.ListOptions{}) if err != nil { if to.StatusWriter != nil { fmt.Fprintf(to.StatusWriter, "ERROR: get pod list in %s: %s\n", ns, err) @@ -213,7 +214,7 @@ func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogO // WatchPods prints pod status events for a certain namespace or all namespaces // when namespace name is empty. func WatchPods(ctx context.Context, cs clientset.Interface, ns string, to io.Writer) error { - watcher, err := cs.Core().Pods(ns).Watch(meta.ListOptions{}) + watcher, err := cs.CoreV1().Pods(ns).Watch(meta.ListOptions{}) if err != nil { return errors.Wrap(err, "cannot create Pod event watcher") } diff --git a/test/e2e/kubectl/framework.go b/test/e2e/kubectl/framework.go index 79e69f9e89..a95ea5a719 100644 --- a/test/e2e/kubectl/framework.go +++ b/test/e2e/kubectl/framework.go @@ -18,6 +18,7 @@ package kubectl import "github.com/onsi/ginkgo" +// SIGDescribe annotates the test with the SIG label. func SIGDescribe(text string, body func()) bool { return ginkgo.Describe("[sig-cli] "+text, body) } diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index 4c8320e951..59c617c628 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -63,8 +63,8 @@ import ( testutils "k8s.io/kubernetes/test/utils" uexec "k8s.io/utils/exec" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -101,14 +101,14 @@ var ( var ( proxyRegexp = regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)") - CronJobGroupVersionResourceAlpha = schema.GroupVersionResource{Group: "batch", Version: "v2alpha1", Resource: "cronjobs"} - CronJobGroupVersionResourceBeta = schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"} + cronJobGroupVersionResourceAlpha = schema.GroupVersionResource{Group: "batch", Version: "v2alpha1", Resource: "cronjobs"} + cronJobGroupVersionResourceBeta = schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"} ) // Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped. // Aware of the kubectl example files map. func cleanupKubectlInputs(fileContents string, ns string, selectors ...string) { - By("using delete to clean up resources") + ginkgo.By("using delete to clean up resources") var nsArg string if ns != "" { nsArg = fmt.Sprintf("--namespace=%s", ns) @@ -120,7 +120,7 @@ func cleanupKubectlInputs(fileContents string, ns string, selectors ...string) { } func readTestFileOrDie(file string) []byte { - return testfiles.ReadOrDie(path.Join(kubeCtlManifestPath, file), Fail) + return testfiles.ReadOrDie(path.Join(kubeCtlManifestPath, file), ginkgo.Fail) } func runKubectlRetryOrDie(args ...string) string { @@ -136,18 +136,18 @@ func runKubectlRetryOrDie(args ...string) string { // Expect no errors to be present after retries are finished // Copied from framework #ExecOrDie framework.Logf("stdout: %q", output) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) return output } // duplicated setup to avoid polluting "normal" clients with alpha features which confuses the generated clients var _ = SIGDescribe("Kubectl alpha client", func() { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() f := framework.NewDefaultFramework("kubectl") var c clientset.Interface var ns string - BeforeEach(func() { + ginkgo.BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name }) @@ -156,22 +156,22 @@ var _ = SIGDescribe("Kubectl alpha client", func() { var nsFlag string var cjName string - BeforeEach(func() { + ginkgo.BeforeEach(func() { nsFlag = fmt.Sprintf("--namespace=%v", ns) cjName = "e2e-test-echo-cronjob-alpha" }) - AfterEach(func() { + ginkgo.AfterEach(func() { framework.RunKubectlOrDie("delete", "cronjobs", cjName, nsFlag) }) - It("should create a CronJob", func() { - framework.SkipIfMissingResource(f.DynamicClient, CronJobGroupVersionResourceAlpha, f.Namespace.Name) + ginkgo.It("should create a CronJob", func() { + framework.SkipIfMissingResource(f.DynamicClient, cronJobGroupVersionResourceAlpha, f.Namespace.Name) schedule := "*/5 * * * ?" framework.RunKubectlOrDie("run", cjName, "--restart=OnFailure", "--generator=cronjob/v2alpha1", "--schedule="+schedule, "--image="+busyboxImage, nsFlag) - By("verifying the CronJob " + cjName + " was created") + ginkgo.By("verifying the CronJob " + cjName + " was created") sj, err := c.BatchV1beta1().CronJobs(ns).Get(cjName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting CronJob %s: %v", cjName, err) @@ -191,7 +191,7 @@ var _ = SIGDescribe("Kubectl alpha client", func() { }) var _ = SIGDescribe("Kubectl client", func() { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() f := framework.NewDefaultFramework("kubectl") // Reusable cluster state function. This won't be adversely affected by lazy initialization of framework. @@ -208,7 +208,7 @@ var _ = SIGDescribe("Kubectl client", func() { } var c clientset.Interface var ns string - BeforeEach(func() { + ginkgo.BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name }) @@ -260,10 +260,10 @@ var _ = SIGDescribe("Kubectl client", func() { framework.KubeDescribe("Update Demo", func() { var nautilus, kitten string - BeforeEach(func() { + ginkgo.BeforeEach(func() { updateDemoRoot := "test/fixtures/doc-yaml/user-guide/update-demo" - nautilus = commonutils.SubstituteImageName(string(testfiles.ReadOrDie(filepath.Join(updateDemoRoot, "nautilus-rc.yaml.in"), Fail))) - kitten = commonutils.SubstituteImageName(string(testfiles.ReadOrDie(filepath.Join(updateDemoRoot, "kitten-rc.yaml.in"), Fail))) + nautilus = commonutils.SubstituteImageName(string(testfiles.ReadOrDie(filepath.Join(updateDemoRoot, "nautilus-rc.yaml.in"), ginkgo.Fail))) + kitten = commonutils.SubstituteImageName(string(testfiles.ReadOrDie(filepath.Join(updateDemoRoot, "kitten-rc.yaml.in"), ginkgo.Fail))) }) /* Release : v1.9 @@ -273,7 +273,7 @@ var _ = SIGDescribe("Kubectl client", func() { framework.ConformanceIt("should create and stop a replication controller ", func() { defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector) - By("creating a replication controller") + ginkgo.By("creating a replication controller") framework.RunKubectlOrDieInput(nautilus, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) }) @@ -286,14 +286,14 @@ var _ = SIGDescribe("Kubectl client", func() { framework.ConformanceIt("should scale a replication controller ", func() { defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector) - By("creating a replication controller") + ginkgo.By("creating a replication controller") framework.RunKubectlOrDieInput(nautilus, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) - By("scaling down the replication controller") + ginkgo.By("scaling down the replication controller") debugDiscovery() framework.RunKubectlOrDie("scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns)) framework.ValidateController(c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) - By("scaling up the replication controller") + ginkgo.By("scaling up the replication controller") debugDiscovery() framework.RunKubectlOrDie("scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns)) framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) @@ -305,10 +305,10 @@ var _ = SIGDescribe("Kubectl client", func() { Description: Create a Pod and a container with a given image. Configure replication controller to run 2 replicas. The number of running instances of the Pod MUST equal the number of replicas set on the replication controller which is 2. Run a rolling update to run a different version of the container. All running instances SHOULD now be running the newer version of the container as part of the rolling update. */ framework.ConformanceIt("should do a rolling update of a replication controller ", func() { - By("creating the initial replication controller") + ginkgo.By("creating the initial replication controller") framework.RunKubectlOrDieInput(string(nautilus[:]), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) - By("rolling-update to new replication controller") + ginkgo.By("rolling-update to new replication controller") debugDiscovery() framework.RunKubectlOrDieInput(string(kitten[:]), "rolling-update", "update-demo-nautilus", "--update-period=1s", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) framework.ValidateController(c, kittenImage, 2, "update-demo", updateDemoSelector, getUDData("kitten.jpg", ns), ns) @@ -327,7 +327,7 @@ var _ = SIGDescribe("Kubectl client", func() { "redis-master-deployment.yaml.in", "redis-slave-deployment.yaml.in", } { - contents := commonutils.SubstituteImageName(string(testfiles.ReadOrDie(filepath.Join(guestbookRoot, gbAppFile), Fail))) + contents := commonutils.SubstituteImageName(string(testfiles.ReadOrDie(filepath.Join(guestbookRoot, gbAppFile), ginkgo.Fail))) run(contents) } } @@ -341,45 +341,45 @@ var _ = SIGDescribe("Kubectl client", func() { defer forEachGBFile(func(contents string) { cleanupKubectlInputs(contents, ns) }) - By("creating all guestbook components") + ginkgo.By("creating all guestbook components") forEachGBFile(func(contents string) { framework.Logf(contents) framework.RunKubectlOrDieInput(contents, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) }) - By("validating guestbook app") + ginkgo.By("validating guestbook app") validateGuestbookApp(c, ns) }) }) framework.KubeDescribe("Simple pod", func() { var podYaml string - BeforeEach(func() { - By(fmt.Sprintf("creating the pod from %v", podYaml)) + ginkgo.BeforeEach(func() { + ginkgo.By(fmt.Sprintf("creating the pod from %v", podYaml)) podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pod-with-readiness-probe.yaml.in"))) framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) - Expect(framework.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout)).To(BeTrue()) + gomega.Expect(framework.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout)).To(gomega.BeTrue()) }) - AfterEach(func() { + ginkgo.AfterEach(func() { cleanupKubectlInputs(podYaml, ns, simplePodSelector) }) - It("should support exec", func() { - By("executing a command in the container") + ginkgo.It("should support exec", func() { + ginkgo.By("executing a command in the container") execOutput := framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodName, "echo", "running", "in", "container") if e, a := "running in container", strings.TrimSpace(execOutput); e != a { framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) } - By("executing a very long command in the container") + ginkgo.By("executing a very long command in the container") veryLongData := make([]rune, 20000) for i := 0; i < len(veryLongData); i++ { veryLongData[i] = 'a' } execOutput = framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodName, "echo", string(veryLongData)) - Expect(string(veryLongData)).To(Equal(strings.TrimSpace(execOutput)), "Unexpected kubectl exec output") + gomega.Expect(string(veryLongData)).To(gomega.Equal(strings.TrimSpace(execOutput)), "Unexpected kubectl exec output") - By("executing a command in the container with noninteractive stdin") + ginkgo.By("executing a command in the container with noninteractive stdin") execOutput = framework.NewKubectlCommand("exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "cat"). WithStdinData("abcd1234"). ExecOrDie() @@ -395,7 +395,7 @@ var _ = SIGDescribe("Kubectl client", func() { // NOTE this is solely for test cleanup! defer closer.Close() - By("executing a command in the container with pseudo-interactive stdin") + ginkgo.By("executing a command in the container with pseudo-interactive stdin") execOutput = framework.NewKubectlCommand("exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "sh"). WithStdinReader(r). ExecOrDie() @@ -404,20 +404,20 @@ var _ = SIGDescribe("Kubectl client", func() { } }) - It("should support exec through an HTTP proxy", func() { + ginkgo.It("should support exec through an HTTP proxy", func() { // Fail if the variable isn't set if framework.TestContext.Host == "" { framework.Failf("--host variable must be set to the full URI to the api server on e2e run.") } - By("Starting goproxy") + ginkgo.By("Starting goproxy") testSrv, proxyLogs := startLocalProxy() defer testSrv.Close() proxyAddr := testSrv.URL for _, proxyVar := range []string{"https_proxy", "HTTPS_PROXY"} { proxyLogs.Reset() - By("Running kubectl via an HTTP proxy using " + proxyVar) + ginkgo.By("Running kubectl via an HTTP proxy using " + proxyVar) output := framework.NewKubectlCommand(fmt.Sprintf("--namespace=%s", ns), "exec", "nginx", "echo", "running", "in", "container"). WithEnv(append(os.Environ(), fmt.Sprintf("%s=%s", proxyVar, proxyAddr))). ExecOrDie() @@ -438,20 +438,20 @@ var _ = SIGDescribe("Kubectl client", func() { } }) - It("should support exec through kubectl proxy", func() { + ginkgo.It("should support exec through kubectl proxy", func() { // Fail if the variable isn't set if framework.TestContext.Host == "" { framework.Failf("--host variable must be set to the full URI to the api server on e2e run.") } - By("Starting kubectl proxy") + ginkgo.By("Starting kubectl proxy") port, proxyCmd, err := startProxyServer() framework.ExpectNoError(err) defer framework.TryKill(proxyCmd) //proxyLogs.Reset() host := fmt.Sprintf("--server=http://127.0.0.1:%d", port) - By("Running kubectl via kubectl proxy using " + host) + ginkgo.By("Running kubectl via kubectl proxy using " + host) output := framework.NewKubectlCommand( host, fmt.Sprintf("--namespace=%s", ns), "exec", "nginx", "echo", "running", "in", "container", @@ -464,60 +464,60 @@ var _ = SIGDescribe("Kubectl client", func() { } }) - It("should return command exit codes", func() { + ginkgo.It("should return command exit codes", func() { nsFlag := fmt.Sprintf("--namespace=%v", ns) - By("execing into a container with a successful command") + ginkgo.By("execing into a container with a successful command") _, err := framework.NewKubectlCommand(nsFlag, "exec", "nginx", "--", "/bin/sh", "-c", "exit 0").Exec() framework.ExpectNoError(err) - By("execing into a container with a failing command") + ginkgo.By("execing into a container with a failing command") _, err = framework.NewKubectlCommand(nsFlag, "exec", "nginx", "--", "/bin/sh", "-c", "exit 42").Exec() ee, ok := err.(uexec.ExitError) - Expect(ok).To(Equal(true)) - Expect(ee.ExitStatus()).To(Equal(42)) + gomega.Expect(ok).To(gomega.Equal(true)) + gomega.Expect(ee.ExitStatus()).To(gomega.Equal(42)) - By("running a successful command") + ginkgo.By("running a successful command") _, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "success", "--", "/bin/sh", "-c", "exit 0").Exec() framework.ExpectNoError(err) - By("running a failing command") + ginkgo.By("running a failing command") _, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec() ee, ok = err.(uexec.ExitError) - Expect(ok).To(Equal(true)) - Expect(ee.ExitStatus()).To(Equal(42)) + gomega.Expect(ok).To(gomega.Equal(true)) + gomega.Expect(ee.ExitStatus()).To(gomega.Equal(42)) - By("running a failing command without --restart=Never") + ginkgo.By("running a failing command without --restart=Never") _, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "failure-2", "--", "/bin/sh", "-c", "cat && exit 42"). WithStdinData("abcd1234"). Exec() framework.ExpectNoError(err) - By("running a failing command without --restart=Never, but with --rm") + ginkgo.By("running a failing command without --restart=Never, but with --rm") _, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "--rm", "failure-3", "--", "/bin/sh", "-c", "cat && exit 42"). WithStdinData("abcd1234"). Exec() framework.ExpectNoError(err) framework.WaitForPodToDisappear(f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout) - By("running a failing command with --leave-stdin-open") + ginkgo.By("running a failing command with --leave-stdin-open") _, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42"). WithStdinData("abcd1234"). Exec() framework.ExpectNoError(err) }) - It("should support inline execution and attach", func() { + ginkgo.It("should support inline execution and attach", func() { nsFlag := fmt.Sprintf("--namespace=%v", ns) - By("executing a command with run and attach with stdin") + ginkgo.By("executing a command with run and attach with stdin") runOutput := framework.NewKubectlCommand(nsFlag, "run", "run-test", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). WithStdinData("abcd1234"). ExecOrDie() g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } runTestPod, _, err := polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test", 1*time.Minute, g) - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) // NOTE: we cannot guarantee our output showed up in the container logs before stdin was closed, so we have // to loop test. err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { @@ -525,30 +525,30 @@ var _ = SIGDescribe("Kubectl client", func() { framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test") } logOutput := framework.RunKubectlOrDie(nsFlag, "logs", runTestPod.Name) - Expect(runOutput).To(ContainSubstring("abcd1234")) - Expect(runOutput).To(ContainSubstring("stdin closed")) + gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234")) + gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed")) return strings.Contains(logOutput, "abcd1234"), nil }) - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) - Expect(c.BatchV1().Jobs(ns).Delete("run-test", nil)).To(BeNil()) + gomega.Expect(c.BatchV1().Jobs(ns).Delete("run-test", nil)).To(gomega.BeNil()) - By("executing a command with run and attach without stdin") + ginkgo.By("executing a command with run and attach without stdin") runOutput = framework.NewKubectlCommand(fmt.Sprintf("--namespace=%v", ns), "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'"). WithStdinData("abcd1234"). ExecOrDie() - Expect(runOutput).ToNot(ContainSubstring("abcd1234")) - Expect(runOutput).To(ContainSubstring("stdin closed")) - Expect(c.BatchV1().Jobs(ns).Delete("run-test-2", nil)).To(BeNil()) + gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("abcd1234")) + gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed")) + gomega.Expect(c.BatchV1().Jobs(ns).Delete("run-test-2", nil)).To(gomega.BeNil()) - By("executing a command with run and attach with stdin with open stdin should remain running") + ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running") runOutput = framework.NewKubectlCommand(nsFlag, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). WithStdinData("abcd1234\n"). ExecOrDie() - Expect(runOutput).ToNot(ContainSubstring("stdin closed")) + gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("stdin closed")) g = func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } runTestPod, _, err = polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g) - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) if !framework.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, time.Minute) { framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3") } @@ -560,20 +560,20 @@ var _ = SIGDescribe("Kubectl client", func() { framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3") } logOutput := framework.RunKubectlOrDie(nsFlag, "logs", runTestPod.Name) - Expect(logOutput).ToNot(ContainSubstring("stdin closed")) + gomega.Expect(logOutput).ToNot(gomega.ContainSubstring("stdin closed")) return strings.Contains(logOutput, "abcd1234"), nil }) - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) - Expect(c.BatchV1().Jobs(ns).Delete("run-test-3", nil)).To(BeNil()) + gomega.Expect(c.BatchV1().Jobs(ns).Delete("run-test-3", nil)).To(gomega.BeNil()) }) - It("should support port-forward", func() { - By("forwarding the container port to a local port") + ginkgo.It("should support port-forward", func() { + ginkgo.By("forwarding the container port to a local port") cmd := runPortForward(ns, simplePodName, simplePodPort) defer cmd.Stop() - By("curling local port output") + ginkgo.By("curling local port output") localAddr := fmt.Sprintf("http://localhost:%d", cmd.port) body, err := curl(localAddr) framework.Logf("got: %s", body) @@ -585,8 +585,8 @@ var _ = SIGDescribe("Kubectl client", func() { } }) - It("should handle in-cluster config", func() { - By("adding rbac permissions") + ginkgo.It("should handle in-cluster config", func() { + ginkgo.By("adding rbac permissions") // grant the view permission widely to allow inspection of the `invalid` namespace and the default namespace framework.BindClusterRole(f.ClientSet.RbacV1beta1(), "view", f.Namespace.Name, rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"}) @@ -596,7 +596,7 @@ var _ = SIGDescribe("Kubectl client", func() { f.Namespace.Name, "list", schema.GroupResource{Resource: "pods"}, true) framework.ExpectNoError(err) - By("overriding icc with values provided by flags") + ginkgo.By("overriding icc with values provided by flags") kubectlPath := framework.TestContext.KubectlPath // we need the actual kubectl binary, not the script wrapper kubectlPathNormalizer := exec.Command("which", kubectlPath) @@ -660,52 +660,52 @@ metadata: framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), ns+"/"+simplePodName+":/tmp/") framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), ns+"/"+simplePodName+":/tmp/") - By("getting pods with in-cluster configs") + ginkgo.By("getting pods with in-cluster configs") execOutput := framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --v=6 2>&1") - Expect(execOutput).To(MatchRegexp("nginx +1/1 +Running")) - Expect(execOutput).To(ContainSubstring("Using in-cluster namespace")) - Expect(execOutput).To(ContainSubstring("Using in-cluster configuration")) + gomega.Expect(execOutput).To(gomega.MatchRegexp("nginx +1/1 +Running")) + gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster namespace")) + gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster configuration")) - By("creating an object containing a namespace with in-cluster config") + ginkgo.By("creating an object containing a namespace with in-cluster config") _, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-with-namespace.yaml --v=6 2>&1") - Expect(err).To(ContainSubstring("Using in-cluster namespace")) - Expect(err).To(ContainSubstring("Using in-cluster configuration")) - Expect(err).To(ContainSubstring(fmt.Sprintf("POST https://%s:%s/api/v1/namespaces/configmap-namespace/configmaps", inClusterHost, inClusterPort))) + gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace")) + gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration")) + gomega.Expect(err).To(gomega.ContainSubstring(fmt.Sprintf("POST https://%s:%s/api/v1/namespaces/configmap-namespace/configmaps", inClusterHost, inClusterPort))) - By("creating an object not containing a namespace with in-cluster config") + ginkgo.By("creating an object not containing a namespace with in-cluster config") _, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-without-namespace.yaml --v=6 2>&1") - Expect(err).To(ContainSubstring("Using in-cluster namespace")) - Expect(err).To(ContainSubstring("Using in-cluster configuration")) - Expect(err).To(ContainSubstring(fmt.Sprintf("POST https://%s:%s/api/v1/namespaces/%s/configmaps", inClusterHost, inClusterPort, f.Namespace.Name))) + gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace")) + gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration")) + gomega.Expect(err).To(gomega.ContainSubstring(fmt.Sprintf("POST https://%s:%s/api/v1/namespaces/%s/configmaps", inClusterHost, inClusterPort, f.Namespace.Name))) - By("trying to use kubectl with invalid token") + ginkgo.By("trying to use kubectl with invalid token") _, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --token=invalid --v=7 2>&1") framework.Logf("got err %v", err) - Expect(err).To(HaveOccurred()) - Expect(err).To(ContainSubstring("Using in-cluster namespace")) - Expect(err).To(ContainSubstring("Using in-cluster configuration")) - Expect(err).To(ContainSubstring("Authorization: Bearer invalid")) - Expect(err).To(ContainSubstring("Response Status: 401 Unauthorized")) + gomega.Expect(err).To(gomega.HaveOccurred()) + gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace")) + gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration")) + gomega.Expect(err).To(gomega.ContainSubstring("Authorization: Bearer invalid")) + gomega.Expect(err).To(gomega.ContainSubstring("Response Status: 401 Unauthorized")) - By("trying to use kubectl with invalid server") + ginkgo.By("trying to use kubectl with invalid server") _, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --server=invalid --v=6 2>&1") framework.Logf("got err %v", err) - Expect(err).To(HaveOccurred()) - Expect(err).To(ContainSubstring("Unable to connect to the server")) - Expect(err).To(ContainSubstring("GET http://invalid/api")) + gomega.Expect(err).To(gomega.HaveOccurred()) + gomega.Expect(err).To(gomega.ContainSubstring("Unable to connect to the server")) + gomega.Expect(err).To(gomega.ContainSubstring("GET http://invalid/api")) - By("trying to use kubectl with invalid namespace") + ginkgo.By("trying to use kubectl with invalid namespace") execOutput = framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --namespace=invalid --v=6 2>&1") - Expect(execOutput).To(ContainSubstring("No resources found")) - Expect(execOutput).ToNot(ContainSubstring("Using in-cluster namespace")) - Expect(execOutput).To(ContainSubstring("Using in-cluster configuration")) - Expect(execOutput).To(MatchRegexp(fmt.Sprintf("GET http[s]?://%s:%s/api/v1/namespaces/invalid/pods", inClusterHost, inClusterPort))) + gomega.Expect(execOutput).To(gomega.ContainSubstring("No resources found")) + gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster namespace")) + gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster configuration")) + gomega.Expect(execOutput).To(gomega.MatchRegexp(fmt.Sprintf("GET http[s]?://%s:%s/api/v1/namespaces/invalid/pods", inClusterHost, inClusterPort))) - By("trying to use kubectl with kubeconfig") + ginkgo.By("trying to use kubectl with kubeconfig") execOutput = framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --kubeconfig=/tmp/"+overrideKubeconfigName+" --v=6 2>&1") - Expect(execOutput).ToNot(ContainSubstring("Using in-cluster namespace")) - Expect(execOutput).ToNot(ContainSubstring("Using in-cluster configuration")) - Expect(execOutput).To(ContainSubstring("GET https://kubernetes.default.svc:443/api/v1/namespaces/default/pods")) + gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster namespace")) + gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster configuration")) + gomega.Expect(execOutput).To(gomega.ContainSubstring("GET https://kubernetes.default.svc:443/api/v1/namespaces/default/pods")) }) }) @@ -716,7 +716,7 @@ metadata: Description: Run kubectl to get api versions, output MUST contain returned versions with ‘v1’ listed. */ framework.ConformanceIt("should check if v1 is in available api versions ", func() { - By("validating api versions") + ginkgo.By("validating api versions") output := framework.RunKubectlOrDie("api-versions") if !strings.Contains(output, "v1") { framework.Failf("No v1 in kubectl api-versions") @@ -724,78 +724,91 @@ metadata: }) }) + framework.KubeDescribe("Kubectl get componentstatuses", func() { + ginkgo.It("should get componentstatuses", func() { + ginkgo.By("getting list of componentstatuses") + output := framework.RunKubectlOrDie("get", "componentstatuses", "-o", "jsonpath={.items[*].metadata.name}") + components := strings.Split(output, " ") + ginkgo.By("getting details of componentstatuses") + for _, component := range components { + ginkgo.By("getting status of " + component) + framework.RunKubectlOrDie("get", "componentstatuses", component) + } + }) + }) + framework.KubeDescribe("Kubectl apply", func() { - It("should apply a new configuration to an existing RC", func() { - controllerJson := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) + ginkgo.It("should apply a new configuration to an existing RC", func() { + controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) nsFlag := fmt.Sprintf("--namespace=%v", ns) - By("creating Redis RC") - framework.RunKubectlOrDieInput(controllerJson, "create", "-f", "-", nsFlag) - By("applying a modified configuration") - stdin := modifyReplicationControllerConfiguration(controllerJson) + ginkgo.By("creating Redis RC") + framework.RunKubectlOrDieInput(controllerJSON, "create", "-f", "-", nsFlag) + ginkgo.By("applying a modified configuration") + stdin := modifyReplicationControllerConfiguration(controllerJSON) framework.NewKubectlCommand("apply", "-f", "-", nsFlag). WithStdinReader(stdin). ExecOrDie() - By("checking the result") + ginkgo.By("checking the result") forEachReplicationController(c, ns, "app", "redis", validateReplicationControllerConfiguration) }) - It("should reuse port when apply to an existing SVC", func() { - serviceJson := readTestFileOrDie(redisServiceFilename) + ginkgo.It("should reuse port when apply to an existing SVC", func() { + serviceJSON := readTestFileOrDie(redisServiceFilename) nsFlag := fmt.Sprintf("--namespace=%v", ns) - By("creating Redis SVC") - framework.RunKubectlOrDieInput(string(serviceJson[:]), "create", "-f", "-", nsFlag) + ginkgo.By("creating Redis SVC") + framework.RunKubectlOrDieInput(string(serviceJSON[:]), "create", "-f", "-", nsFlag) - By("getting the original port") + ginkgo.By("getting the original port") originalNodePort := framework.RunKubectlOrDie("get", "service", "redis-master", nsFlag, "-o", "jsonpath={.spec.ports[0].port}") - By("applying the same configuration") - framework.RunKubectlOrDieInput(string(serviceJson[:]), "apply", "-f", "-", nsFlag) + ginkgo.By("applying the same configuration") + framework.RunKubectlOrDieInput(string(serviceJSON[:]), "apply", "-f", "-", nsFlag) - By("getting the port after applying configuration") + ginkgo.By("getting the port after applying configuration") currentNodePort := framework.RunKubectlOrDie("get", "service", "redis-master", nsFlag, "-o", "jsonpath={.spec.ports[0].port}") - By("checking the result") + ginkgo.By("checking the result") if originalNodePort != currentNodePort { framework.Failf("port should keep the same") } }) - It("apply set/view last-applied", func() { + ginkgo.It("apply set/view last-applied", func() { deployment1Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(nginxDeployment1Filename))) deployment2Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(nginxDeployment2Filename))) deployment3Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(nginxDeployment3Filename))) nsFlag := fmt.Sprintf("--namespace=%v", ns) - By("deployment replicas number is 2") + ginkgo.By("deployment replicas number is 2") framework.RunKubectlOrDieInput(deployment1Yaml, "apply", "-f", "-", nsFlag) - By("check the last-applied matches expectations annotations") + ginkgo.By("check the last-applied matches expectations annotations") output := framework.RunKubectlOrDieInput(deployment1Yaml, "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json") requiredString := "\"replicas\": 2" if !strings.Contains(output, requiredString) { framework.Failf("Missing %s in kubectl view-last-applied", requiredString) } - By("apply file doesn't have replicas") + ginkgo.By("apply file doesn't have replicas") framework.RunKubectlOrDieInput(deployment2Yaml, "apply", "set-last-applied", "-f", "-", nsFlag) - By("check last-applied has been updated, annotations doesn't have replicas") + ginkgo.By("check last-applied has been updated, annotations doesn't have replicas") output = framework.RunKubectlOrDieInput(deployment1Yaml, "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json") requiredString = "\"replicas\": 2" if strings.Contains(output, requiredString) { framework.Failf("Presenting %s in kubectl view-last-applied", requiredString) } - By("scale set replicas to 3") + ginkgo.By("scale set replicas to 3") nginxDeploy := "nginx-deployment" debugDiscovery() framework.RunKubectlOrDie("scale", "deployment", nginxDeploy, "--replicas=3", nsFlag) - By("apply file doesn't have replicas but image changed") + ginkgo.By("apply file doesn't have replicas but image changed") framework.RunKubectlOrDieInput(deployment3Yaml, "apply", "-f", "-", nsFlag) - By("verify replicas still is 3 and image has been updated") + ginkgo.By("verify replicas still is 3 and image has been updated") output = framework.RunKubectlOrDieInput(deployment3Yaml, "get", "-f", "-", nsFlag, "-o", "json") requiredItems := []string{"\"replicas\": 3", imageutils.GetE2EImage(imageutils.Nginx)} for _, item := range requiredItems { @@ -813,7 +826,7 @@ metadata: Description: Call kubectl to get cluster-info, output MUST contain cluster-info returned and Kubernetes Master SHOULD be running. */ framework.ConformanceIt("should check if Kubernetes master services is included in cluster-info ", func() { - By("validating cluster-info") + ginkgo.By("validating cluster-info") output := framework.RunKubectlOrDie("cluster-info") // Can't check exact strings due to terminal control commands (colors) requiredItems := []string{"Kubernetes master", "is running at"} @@ -826,8 +839,8 @@ metadata: }) framework.KubeDescribe("Kubectl cluster-info dump", func() { - It("should check if cluster-info dump succeeds", func() { - By("running cluster-info dump") + ginkgo.It("should check if cluster-info dump succeeds", func() { + ginkgo.By("running cluster-info dump") framework.RunKubectlOrDie("cluster-info", "dump") }) }) @@ -840,16 +853,16 @@ metadata: */ framework.ConformanceIt("should check if kubectl describe prints relevant information for rc and pods ", func() { kv, err := framework.KubectlVersion() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.SkipUnlessServerVersionGTE(kv, c.Discovery()) - controllerJson := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) - serviceJson := readTestFileOrDie(redisServiceFilename) + controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) + serviceJSON := readTestFileOrDie(redisServiceFilename) nsFlag := fmt.Sprintf("--namespace=%v", ns) - framework.RunKubectlOrDieInput(controllerJson, "create", "-f", "-", nsFlag) - framework.RunKubectlOrDieInput(string(serviceJson[:]), "create", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(controllerJSON, "create", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(string(serviceJSON[:]), "create", "-f", "-", nsFlag) - By("Waiting for Redis master to start.") + ginkgo.By("Waiting for Redis master to start.") waitForOrFailWithDebug(1) // Pod @@ -906,7 +919,7 @@ metadata: // Node // It should be OK to list unschedulable Nodes here. nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) node := nodes.Items[0] output = framework.RunKubectlOrDie("describe", "node", node.Name) requiredStrings = [][]string{ @@ -947,18 +960,18 @@ metadata: Description: Create a Pod running redis master listening to port 6379. Using kubectl expose the redis master replication controllers at port 1234. Validate that the replication controller is listening on port 1234 and the target port is set to 6379, port that redis master is listening. Using kubectl expose the redis master as a service at port 2345. The service MUST be listening on port 2345 and the target port is set to 6379, port that redis master is listening. */ framework.ConformanceIt("should create services for rc ", func() { - controllerJson := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) + controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) nsFlag := fmt.Sprintf("--namespace=%v", ns) redisPort := 6379 - By("creating Redis RC") + ginkgo.By("creating Redis RC") framework.Logf("namespace %v", ns) - framework.RunKubectlOrDieInput(controllerJson, "create", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(controllerJSON, "create", "-f", "-", nsFlag) // It may take a while for the pods to get registered in some cases, wait to be sure. - By("Waiting for Redis master to start.") + ginkgo.By("Waiting for Redis master to start.") waitForOrFailWithDebug(1) forEachPod(func(pod v1.Pod) { framework.Logf("wait on redis-master startup in %v ", ns) @@ -995,10 +1008,10 @@ metadata: } return true, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) service, err := c.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if len(service.Spec.Ports) != 1 { framework.Failf("1 port is expected") @@ -1012,12 +1025,12 @@ metadata: } } - By("exposing RC") + ginkgo.By("exposing RC") framework.RunKubectlOrDie("expose", "rc", "redis-master", "--name=rm2", "--port=1234", fmt.Sprintf("--target-port=%d", redisPort), nsFlag) framework.WaitForService(c, ns, "rm2", true, framework.Poll, framework.ServiceStartTimeout) validateService("rm2", 1234, framework.ServiceStartTimeout) - By("exposing service") + ginkgo.By("exposing service") framework.RunKubectlOrDie("expose", "service", "rm2", "--name=rm3", "--port=2345", fmt.Sprintf("--target-port=%d", redisPort), nsFlag) framework.WaitForService(c, ns, "rm3", true, framework.Poll, framework.ServiceStartTimeout) validateService("rm3", 2345, framework.ServiceStartTimeout) @@ -1027,14 +1040,14 @@ metadata: framework.KubeDescribe("Kubectl label", func() { var podYaml string var nsFlag string - BeforeEach(func() { - By("creating the pod") + ginkgo.BeforeEach(func() { + ginkgo.By("creating the pod") podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pause-pod.yaml.in"))) nsFlag = fmt.Sprintf("--namespace=%v", ns) framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag) - Expect(framework.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout)).To(BeTrue()) + gomega.Expect(framework.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout)).To(gomega.BeTrue()) }) - AfterEach(func() { + ginkgo.AfterEach(func() { cleanupKubectlInputs(podYaml, ns, pausePodSelector) }) @@ -1047,17 +1060,17 @@ metadata: labelName := "testing-label" labelValue := "testing-label-value" - By("adding the label " + labelName + " with value " + labelValue + " to a pod") + ginkgo.By("adding the label " + labelName + " with value " + labelValue + " to a pod") framework.RunKubectlOrDie("label", "pods", pausePodName, labelName+"="+labelValue, nsFlag) - By("verifying the pod has the label " + labelName + " with the value " + labelValue) + ginkgo.By("verifying the pod has the label " + labelName + " with the value " + labelValue) output := framework.RunKubectlOrDie("get", "pod", pausePodName, "-L", labelName, nsFlag) if !strings.Contains(output, labelValue) { framework.Failf("Failed updating label " + labelName + " to the pod " + pausePodName) } - By("removing the label " + labelName + " of a pod") + ginkgo.By("removing the label " + labelName + " of a pod") framework.RunKubectlOrDie("label", "pods", pausePodName, labelName+"-", nsFlag) - By("verifying the pod doesn't have the label " + labelName) + ginkgo.By("verifying the pod doesn't have the label " + labelName) output = framework.RunKubectlOrDie("get", "pod", pausePodName, "-L", labelName, nsFlag) if strings.Contains(output, labelValue) { framework.Failf("Failed removing label " + labelName + " of the pod " + pausePodName) @@ -1068,14 +1081,14 @@ metadata: framework.KubeDescribe("Kubectl copy", func() { var podYaml string var nsFlag string - BeforeEach(func() { - By("creating the pod") + ginkgo.BeforeEach(func() { + ginkgo.By("creating the pod") nsFlag = fmt.Sprintf("--namespace=%v", ns) podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-pod.yaml"))) framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag) - Expect(framework.CheckPodsRunningReady(c, ns, []string{busyboxPodName}, framework.PodStartTimeout)).To(BeTrue()) + gomega.Expect(framework.CheckPodsRunningReady(c, ns, []string{busyboxPodName}, framework.PodStartTimeout)).To(gomega.BeTrue()) }) - AfterEach(func() { + ginkgo.AfterEach(func() { cleanupKubectlInputs(podYaml, ns, busyboxPodSelector) }) @@ -1084,7 +1097,7 @@ metadata: Testname: Kubectl, copy Description: When a Pod is running, copy a known file from it to a temporary local destination. */ - It("should copy a file from a running Pod", func() { + ginkgo.It("should copy a file from a running Pod", func() { remoteContents := "foobar\n" podSource := fmt.Sprintf("%s:/root/foo/bar/foo.bar", busyboxPodName) tempDestination, err := ioutil.TempFile(os.TempDir(), "copy-foobar") @@ -1092,9 +1105,9 @@ metadata: framework.Failf("Failed creating temporary destination file: %v", err) } - By("specifying a remote filepath " + podSource + " on the pod") + ginkgo.By("specifying a remote filepath " + podSource + " on the pod") framework.RunKubectlOrDie("cp", podSource, tempDestination.Name(), nsFlag) - By("verifying that the contents of the remote file " + podSource + " have been copied to a local file " + tempDestination.Name()) + ginkgo.By("verifying that the contents of the remote file " + podSource + " have been copied to a local file " + tempDestination.Name()) localData, err := ioutil.ReadAll(tempDestination) if err != nil { framework.Failf("Failed reading temporary local file: %v", err) @@ -1109,13 +1122,13 @@ metadata: var nsFlag string var rc string containerName := "redis-master" - BeforeEach(func() { - By("creating an rc") + ginkgo.BeforeEach(func() { + ginkgo.By("creating an rc") rc = commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) nsFlag = fmt.Sprintf("--namespace=%v", ns) framework.RunKubectlOrDieInput(rc, "create", "-f", "-", nsFlag) }) - AfterEach(func() { + ginkgo.AfterEach(func() { cleanupKubectlInputs(rc, ns, simplePodSelector) }) @@ -1137,45 +1150,45 @@ metadata: return strings.Split(strings.TrimRight(out, "\n"), "\n") } - By("Waiting for Redis master to start.") + ginkgo.By("Waiting for Redis master to start.") waitForOrFailWithDebug(1) forEachPod(func(pod v1.Pod) { - By("checking for a matching strings") + ginkgo.By("checking for a matching strings") _, err := framework.LookForStringInLog(ns, pod.Name, containerName, "The server is now ready to accept connections", framework.PodStartTimeout) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("limiting log lines") + ginkgo.By("limiting log lines") out := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--tail=1") - Expect(len(out)).NotTo(BeZero()) - Expect(len(lines(out))).To(Equal(1)) + gomega.Expect(len(out)).NotTo(gomega.BeZero()) + gomega.Expect(len(lines(out))).To(gomega.Equal(1)) - By("limiting log bytes") + ginkgo.By("limiting log bytes") out = framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--limit-bytes=1") - Expect(len(lines(out))).To(Equal(1)) - Expect(len(out)).To(Equal(1)) + gomega.Expect(len(lines(out))).To(gomega.Equal(1)) + gomega.Expect(len(out)).To(gomega.Equal(1)) - By("exposing timestamps") + ginkgo.By("exposing timestamps") out = framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--tail=1", "--timestamps") l := lines(out) - Expect(len(l)).To(Equal(1)) + gomega.Expect(len(l)).To(gomega.Equal(1)) words := strings.Split(l[0], " ") - Expect(len(words)).To(BeNumerically(">", 1)) + gomega.Expect(len(words)).To(gomega.BeNumerically(">", 1)) if _, err := time.Parse(time.RFC3339Nano, words[0]); err != nil { if _, err := time.Parse(time.RFC3339, words[0]); err != nil { framework.Failf("expected %q to be RFC3339 or RFC3339Nano", words[0]) } } - By("restricting to a time range") + ginkgo.By("restricting to a time range") // Note: we must wait at least two seconds, // because the granularity is only 1 second and // it could end up rounding the wrong way. time.Sleep(2500 * time.Millisecond) // ensure that startup logs on the node are seen as older than 1s - recent_out := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--since=1s") - recent := len(strings.Split(recent_out, "\n")) - older_out := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--since=24h") - older := len(strings.Split(older_out, "\n")) - Expect(recent).To(BeNumerically("<", older), "expected recent(%v) to be less than older(%v)\nrecent lines:\n%v\nolder lines:\n%v\n", recent, older, recent_out, older_out) + recentOut := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--since=1s") + recent := len(strings.Split(recentOut, "\n")) + olderOut := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--since=24h") + older := len(strings.Split(olderOut, "\n")) + gomega.Expect(recent).To(gomega.BeNumerically("<", older), "expected recent(%v) to be less than older(%v)\nrecent lines:\n%v\nolder lines:\n%v\n", recent, older, recentOut, olderOut) }) }) }) @@ -1187,18 +1200,18 @@ metadata: Description: Start running a redis master and a replication controller. When the pod is running, using ‘kubectl patch’ command add annotations. The annotation MUST be added to running pods and SHOULD be able to read added annotations from each of the Pods running under the replication controller. */ framework.ConformanceIt("should add annotations for pods in rc ", func() { - controllerJson := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) + controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) nsFlag := fmt.Sprintf("--namespace=%v", ns) - By("creating Redis RC") - framework.RunKubectlOrDieInput(controllerJson, "create", "-f", "-", nsFlag) - By("Waiting for Redis master to start.") + ginkgo.By("creating Redis RC") + framework.RunKubectlOrDieInput(controllerJSON, "create", "-f", "-", nsFlag) + ginkgo.By("Waiting for Redis master to start.") waitForOrFailWithDebug(1) - By("patching all pods") + ginkgo.By("patching all pods") forEachPod(func(pod v1.Pod) { framework.RunKubectlOrDie("patch", "pod", pod.Name, nsFlag, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}") }) - By("checking annotations") + ginkgo.By("checking annotations") forEachPod(func(pod v1.Pod) { found := false for key, val := range pod.Annotations { @@ -1237,13 +1250,13 @@ metadata: var cleanUp func() - BeforeEach(func() { + ginkgo.BeforeEach(func() { nsFlag = fmt.Sprintf("--namespace=%v", ns) name = "e2e-test-nginx-deployment" cleanUp = func() { framework.RunKubectlOrDie("delete", "deployment", name, nsFlag) } }) - AfterEach(func() { + ginkgo.AfterEach(func() { cleanUp() }) @@ -1253,9 +1266,9 @@ metadata: Description: Command ‘kubectl run’ MUST create a running pod with possible replicas given a image using the option --image=’nginx’. The running Pod SHOULD have one container and the container SHOULD be running the image specified in the ‘run’ command. */ framework.ConformanceIt("should create an rc or deployment from an image ", func() { - By("running the image " + nginxImage) + ginkgo.By("running the image " + nginxImage) framework.RunKubectlOrDie("run", name, "--image="+nginxImage, nsFlag) - By("verifying the pod controlled by " + name + " gets created") + ginkgo.By("verifying the pod controlled by " + name + " gets created") label := labels.SelectorFromSet(labels.Set(map[string]string{"run": name})) podlist, err := framework.WaitForPodsWithLabel(c, ns, label) if err != nil { @@ -1273,12 +1286,12 @@ metadata: var nsFlag string var rcName string - BeforeEach(func() { + ginkgo.BeforeEach(func() { nsFlag = fmt.Sprintf("--namespace=%v", ns) rcName = "e2e-test-nginx-rc" }) - AfterEach(func() { + ginkgo.AfterEach(func() { framework.RunKubectlOrDie("delete", "rc", rcName, nsFlag) }) @@ -1288,9 +1301,9 @@ metadata: Description: Command ‘kubectl run’ MUST create a running rc with default one replicas given a image using the option --image=’nginx’. The running replication controller SHOULD have one container and the container SHOULD be running the image specified in the ‘run’ command. Also there MUST be 1 pod controlled by this replica set running 1 container with the image specified. A ‘kubetctl logs’ command MUST return the logs from the container in the replication controller. */ framework.ConformanceIt("should create an rc from an image ", func() { - By("running the image " + nginxImage) + ginkgo.By("running the image " + nginxImage) framework.RunKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag) - By("verifying the rc " + rcName + " was created") + ginkgo.By("verifying the rc " + rcName + " was created") rc, err := c.CoreV1().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting rc %s: %v", rcName, err) @@ -1300,7 +1313,7 @@ metadata: framework.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage) } - By("verifying the pod controlled by rc " + rcName + " was created") + ginkgo.By("verifying the pod controlled by rc " + rcName + " was created") label := labels.SelectorFromSet(labels.Set(map[string]string{"run": rcName})) podlist, err := framework.WaitForPodsWithLabel(c, ns, label) if err != nil { @@ -1312,7 +1325,7 @@ metadata: framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods)) } - By("confirm that you can get logs from an rc") + ginkgo.By("confirm that you can get logs from an rc") podNames := []string{} for _, pod := range pods { podNames = append(podNames, pod.Name) @@ -1333,13 +1346,13 @@ metadata: var rcName string var c clientset.Interface - BeforeEach(func() { + ginkgo.BeforeEach(func() { c = f.ClientSet nsFlag = fmt.Sprintf("--namespace=%v", ns) rcName = "e2e-test-nginx-rc" }) - AfterEach(func() { + ginkgo.AfterEach(func() { framework.RunKubectlOrDie("delete", "rc", rcName, nsFlag) }) @@ -1349,9 +1362,9 @@ metadata: Description: Command ‘kubectl rolling-update’ MUST replace the specified replication controller with a new replication controller by updating one pod at a time to use the new Pod spec. */ framework.ConformanceIt("should support rolling-update to same image ", func() { - By("running the image " + nginxImage) + ginkgo.By("running the image " + nginxImage) framework.RunKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag) - By("verifying the rc " + rcName + " was created") + ginkgo.By("verifying the rc " + rcName + " was created") rc, err := c.CoreV1().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting rc %s: %v", rcName, err) @@ -1362,7 +1375,7 @@ metadata: } framework.WaitForRCToStabilize(c, ns, rcName, framework.PodStartTimeout) - By("rolling-update to same image controller") + ginkgo.By("rolling-update to same image controller") debugDiscovery() runKubectlRetryOrDie("rolling-update", rcName, "--update-period=1s", "--image="+nginxImage, "--image-pull-policy="+string(v1.PullIfNotPresent), nsFlag) @@ -1374,12 +1387,12 @@ metadata: var nsFlag string var dName string - BeforeEach(func() { + ginkgo.BeforeEach(func() { nsFlag = fmt.Sprintf("--namespace=%v", ns) dName = "e2e-test-nginx-deployment" }) - AfterEach(func() { + ginkgo.AfterEach(func() { err := wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) { out, err := framework.RunKubectl("delete", "deployment", dName, nsFlag) if err != nil { @@ -1390,7 +1403,7 @@ metadata: } return true, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -1399,9 +1412,9 @@ metadata: Description: Command ‘kubectl run’ MUST create a deployment, with --generator=deployment, when a image name is specified in the run command. After the run command there SHOULD be a deployment that should exist with one container running the specified image. Also there SHOULD be a Pod that is controlled by this deployment, with a container running the specified image. */ framework.ConformanceIt("should create a deployment from an image ", func() { - By("running the image " + nginxImage) + ginkgo.By("running the image " + nginxImage) framework.RunKubectlOrDie("run", dName, "--image="+nginxImage, "--generator=deployment/v1beta1", nsFlag) - By("verifying the deployment " + dName + " was created") + ginkgo.By("verifying the deployment " + dName + " was created") d, err := c.AppsV1().Deployments(ns).Get(dName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting deployment %s: %v", dName, err) @@ -1411,7 +1424,7 @@ metadata: framework.Failf("Failed creating deployment %s for 1 pod with expected image %s", dName, nginxImage) } - By("verifying the pod controlled by deployment " + dName + " was created") + ginkgo.By("verifying the pod controlled by deployment " + dName + " was created") label := labels.SelectorFromSet(labels.Set(map[string]string{"run": dName})) podlist, err := framework.WaitForPodsWithLabel(c, ns, label) if err != nil { @@ -1429,12 +1442,12 @@ metadata: var nsFlag string var jobName string - BeforeEach(func() { + ginkgo.BeforeEach(func() { nsFlag = fmt.Sprintf("--namespace=%v", ns) jobName = "e2e-test-nginx-job" }) - AfterEach(func() { + ginkgo.AfterEach(func() { framework.RunKubectlOrDie("delete", "jobs", jobName, nsFlag) }) @@ -1444,9 +1457,9 @@ metadata: Description: Command ‘kubectl run’ MUST create a job, with --generator=job, when a image name is specified in the run command. After the run command there SHOULD be a job that should exist with one container running the specified image. Also there SHOULD be a restart policy on the job spec that SHOULD match the command line. */ framework.ConformanceIt("should create a job from an image when restart is OnFailure ", func() { - By("running the image " + nginxImage) + ginkgo.By("running the image " + nginxImage) framework.RunKubectlOrDie("run", jobName, "--restart=OnFailure", "--generator=job/v1", "--image="+nginxImage, nsFlag) - By("verifying the job " + jobName + " was created") + ginkgo.By("verifying the job " + jobName + " was created") job, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting job %s: %v", jobName, err) @@ -1465,22 +1478,22 @@ metadata: var nsFlag string var cjName string - BeforeEach(func() { + ginkgo.BeforeEach(func() { nsFlag = fmt.Sprintf("--namespace=%v", ns) cjName = "e2e-test-echo-cronjob-beta" }) - AfterEach(func() { + ginkgo.AfterEach(func() { framework.RunKubectlOrDie("delete", "cronjobs", cjName, nsFlag) }) - It("should create a CronJob", func() { - framework.SkipIfMissingResource(f.DynamicClient, CronJobGroupVersionResourceBeta, f.Namespace.Name) + ginkgo.It("should create a CronJob", func() { + framework.SkipIfMissingResource(f.DynamicClient, cronJobGroupVersionResourceBeta, f.Namespace.Name) schedule := "*/5 * * * ?" framework.RunKubectlOrDie("run", cjName, "--restart=OnFailure", "--generator=cronjob/v1beta1", "--schedule="+schedule, "--image="+busyboxImage, nsFlag) - By("verifying the CronJob " + cjName + " was created") + ginkgo.By("verifying the CronJob " + cjName + " was created") cj, err := c.BatchV1beta1().CronJobs(ns).Get(cjName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting CronJob %s: %v", cjName, err) @@ -1502,12 +1515,12 @@ metadata: var nsFlag string var podName string - BeforeEach(func() { + ginkgo.BeforeEach(func() { nsFlag = fmt.Sprintf("--namespace=%v", ns) podName = "e2e-test-nginx-pod" }) - AfterEach(func() { + ginkgo.AfterEach(func() { framework.RunKubectlOrDie("delete", "pods", podName, nsFlag) }) @@ -1517,9 +1530,9 @@ metadata: Description: Command ‘kubectl run’ MUST create a pod, with --generator=run-pod, when a image name is specified in the run command. After the run command there SHOULD be a pod that should exist with one container running the specified image. */ framework.ConformanceIt("should create a pod from an image when restart is Never ", func() { - By("running the image " + nginxImage) + ginkgo.By("running the image " + nginxImage) framework.RunKubectlOrDie("run", podName, "--restart=Never", "--generator=run-pod/v1", "--image="+nginxImage, nsFlag) - By("verifying the pod " + podName + " was created") + ginkgo.By("verifying the pod " + podName + " was created") pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting pod %s: %v", podName, err) @@ -1538,12 +1551,12 @@ metadata: var nsFlag string var podName string - BeforeEach(func() { + ginkgo.BeforeEach(func() { nsFlag = fmt.Sprintf("--namespace=%v", ns) podName = "e2e-test-nginx-pod" }) - AfterEach(func() { + ginkgo.AfterEach(func() { framework.RunKubectlOrDie("delete", "pods", podName, nsFlag) }) @@ -1553,27 +1566,27 @@ metadata: Description: Command ‘kubectl replace’ on a existing Pod with a new spec MUST update the image of the container running in the Pod. A -f option to ‘kubectl replace’ SHOULD force to re-create the resource. The new Pod SHOULD have the container with new change to the image. */ framework.ConformanceIt("should update a single-container pod's image ", func() { - By("running the image " + nginxImage) + ginkgo.By("running the image " + nginxImage) framework.RunKubectlOrDie("run", podName, "--generator=run-pod/v1", "--image="+nginxImage, "--labels=run="+podName, nsFlag) - By("verifying the pod " + podName + " is running") + ginkgo.By("verifying the pod " + podName + " is running") label := labels.SelectorFromSet(labels.Set(map[string]string{"run": podName})) err := testutils.WaitForPodsWithLabelRunning(c, ns, label) if err != nil { framework.Failf("Failed getting pod %s: %v", podName, err) } - By("verifying the pod " + podName + " was created") - podJson := framework.RunKubectlOrDie("get", "pod", podName, nsFlag, "-o", "json") - if !strings.Contains(podJson, podName) { - framework.Failf("Failed to find pod %s in [%s]", podName, podJson) + ginkgo.By("verifying the pod " + podName + " was created") + podJSON := framework.RunKubectlOrDie("get", "pod", podName, nsFlag, "-o", "json") + if !strings.Contains(podJSON, podName) { + framework.Failf("Failed to find pod %s in [%s]", podName, podJSON) } - By("replace the image in the pod") - podJson = strings.Replace(podJson, nginxImage, busyboxImage, 1) - framework.RunKubectlOrDieInput(podJson, "replace", "-f", "-", nsFlag) + ginkgo.By("replace the image in the pod") + podJSON = strings.Replace(podJSON, nginxImage, busyboxImage, 1) + framework.RunKubectlOrDieInput(podJSON, "replace", "-f", "-", nsFlag) - By("verifying the pod " + podName + " has the right image " + busyboxImage) + ginkgo.By("verifying the pod " + podName + " has the right image " + busyboxImage) pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting deployment %s: %v", podName, err) @@ -1596,23 +1609,23 @@ metadata: framework.ConformanceIt("should create a job from an image, then delete the job ", func() { nsFlag := fmt.Sprintf("--namespace=%v", ns) - By("executing a command with run --rm and attach with stdin") + ginkgo.By("executing a command with run --rm and attach with stdin") t := time.NewTimer(runJobTimeout) defer t.Stop() runOutput := framework.NewKubectlCommand(nsFlag, "run", jobName, "--image="+busyboxImage, "--rm=true", "--generator=job/v1", "--restart=OnFailure", "--attach=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). WithStdinData("abcd1234"). WithTimeout(t.C). ExecOrDie() - Expect(runOutput).To(ContainSubstring("abcd1234")) - Expect(runOutput).To(ContainSubstring("stdin closed")) + gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234")) + gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed")) err := framework.WaitForJobGone(c, ns, jobName, wait.ForeverTestTimeout) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("verifying the job " + jobName + " was deleted") + ginkgo.By("verifying the job " + jobName + " was deleted") _, err = c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) - Expect(err).To(HaveOccurred()) - Expect(apierrs.IsNotFound(err)).To(BeTrue()) + gomega.Expect(err).To(gomega.HaveOccurred()) + gomega.Expect(apierrs.IsNotFound(err)).To(gomega.BeTrue()) }) }) @@ -1624,7 +1637,7 @@ metadata: Description: Start a proxy server on port zero by running ‘kubectl proxy’ with --port=0. Call the proxy server by requesting api versions from unix socket. The proxy server MUST provide at least one version string. */ framework.ConformanceIt("should support proxy with --port 0 ", func() { - By("starting the proxy server") + ginkgo.By("starting the proxy server") port, cmd, err := startProxyServer() if cmd != nil { defer framework.TryKill(cmd) @@ -1632,7 +1645,7 @@ metadata: if err != nil { framework.Failf("Failed to start proxy server: %v", err) } - By("curling proxy /api/ output") + ginkgo.By("curling proxy /api/ output") localAddr := fmt.Sprintf("http://localhost:%d/api/", port) apiVersions, err := getAPIVersions(localAddr) if err != nil { @@ -1649,7 +1662,7 @@ metadata: Description: Start a proxy server on by running ‘kubectl proxy’ with --unix-socket=. Call the proxy server by requesting api versions from http://locahost:0/api. The proxy server MUST provide at least one version string */ framework.ConformanceIt("should support --unix-socket=/path ", func() { - By("Starting the proxy") + ginkgo.By("Starting the proxy") tmpdir, err := ioutil.TempDir("", "kubectl-proxy-unix") if err != nil { framework.Failf("Failed to create temporary directory: %v", err) @@ -1669,7 +1682,7 @@ metadata: if _, err = stdout.Read(buf); err != nil { framework.Failf("Expected output from kubectl proxy: %v", err) } - By("retrieving proxy /api/ output") + ginkgo.By("retrieving proxy /api/ output") _, err = curlUnix("http://unused/api", path) if err != nil { framework.Failf("Failed get of /api at %s: %v", path, err) @@ -1680,7 +1693,7 @@ metadata: // This test must run [Serial] because it modifies the node so it doesn't allow pods to execute on // it, which will affect anything else running in parallel. framework.KubeDescribe("Kubectl taint [Serial]", func() { - It("should update the taint on a node", func() { + ginkgo.It("should update the taint on a node", func() { testTaint := v1.Taint{ Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-001-%s", string(uuid.NewUUID())), Value: "testing-taint-value", @@ -1689,11 +1702,11 @@ metadata: nodeName := scheduling.GetNodeThatCanRunPod(f) - By("adding the taint " + testTaint.ToString() + " to a node") + ginkgo.By("adding the taint " + testTaint.ToString() + " to a node") runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.ToString()) defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, testTaint) - By("verifying the node has the taint " + testTaint.ToString()) + ginkgo.By("verifying the node has the taint " + testTaint.ToString()) output := runKubectlRetryOrDie("describe", "node", nodeName) requiredStrings := [][]string{ {"Name:", nodeName}, @@ -1702,16 +1715,16 @@ metadata: } checkOutput(output, requiredStrings) - By("removing the taint " + testTaint.ToString() + " of a node") + ginkgo.By("removing the taint " + testTaint.ToString() + " of a node") runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.Key+":"+string(testTaint.Effect)+"-") - By("verifying the node doesn't have the taint " + testTaint.Key) + ginkgo.By("verifying the node doesn't have the taint " + testTaint.Key) output = runKubectlRetryOrDie("describe", "node", nodeName) if strings.Contains(output, testTaint.Key) { framework.Failf("Failed removing taint " + testTaint.Key + " of the node " + nodeName) } }) - It("should remove all the taints with the same key off a node", func() { + ginkgo.It("should remove all the taints with the same key off a node", func() { testTaint := v1.Taint{ Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-002-%s", string(uuid.NewUUID())), Value: "testing-taint-value", @@ -1720,11 +1733,11 @@ metadata: nodeName := scheduling.GetNodeThatCanRunPod(f) - By("adding the taint " + testTaint.ToString() + " to a node") + ginkgo.By("adding the taint " + testTaint.ToString() + " to a node") runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.ToString()) defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, testTaint) - By("verifying the node has the taint " + testTaint.ToString()) + ginkgo.By("verifying the node has the taint " + testTaint.ToString()) output := runKubectlRetryOrDie("describe", "node", nodeName) requiredStrings := [][]string{ {"Name:", nodeName}, @@ -1738,11 +1751,11 @@ metadata: Value: "another-testing-taint-value", Effect: v1.TaintEffectPreferNoSchedule, } - By("adding another taint " + newTestTaint.ToString() + " to the node") + ginkgo.By("adding another taint " + newTestTaint.ToString() + " to the node") runKubectlRetryOrDie("taint", "nodes", nodeName, newTestTaint.ToString()) defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, newTestTaint) - By("verifying the node has the taint " + newTestTaint.ToString()) + ginkgo.By("verifying the node has the taint " + newTestTaint.ToString()) output = runKubectlRetryOrDie("describe", "node", nodeName) requiredStrings = [][]string{ {"Name:", nodeName}, @@ -1756,11 +1769,11 @@ metadata: Value: "testing-taint-value-no-execute", Effect: v1.TaintEffectNoExecute, } - By("adding NoExecute taint " + noExecuteTaint.ToString() + " to the node") + ginkgo.By("adding NoExecute taint " + noExecuteTaint.ToString() + " to the node") runKubectlRetryOrDie("taint", "nodes", nodeName, noExecuteTaint.ToString()) defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, noExecuteTaint) - By("verifying the node has the taint " + noExecuteTaint.ToString()) + ginkgo.By("verifying the node has the taint " + noExecuteTaint.ToString()) output = runKubectlRetryOrDie("describe", "node", nodeName) requiredStrings = [][]string{ {"Name:", nodeName}, @@ -1769,9 +1782,9 @@ metadata: } checkOutput(output, requiredStrings) - By("removing all taints that have the same key " + testTaint.Key + " of the node") + ginkgo.By("removing all taints that have the same key " + testTaint.Key + " of the node") runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.Key+"-") - By("verifying the node doesn't have the taints that have the same key " + testTaint.Key) + ginkgo.By("verifying the node doesn't have the taints that have the same key " + testTaint.Key) output = runKubectlRetryOrDie("describe", "node", nodeName) if strings.Contains(output, testTaint.Key) { framework.Failf("Failed removing taints " + testTaint.Key + " of the node " + nodeName) @@ -1780,14 +1793,14 @@ metadata: }) framework.KubeDescribe("Kubectl create quota", func() { - It("should create a quota without scopes", func() { + ginkgo.It("should create a quota without scopes", func() { nsFlag := fmt.Sprintf("--namespace=%v", ns) quotaName := "million" - By("calling kubectl quota") + ginkgo.By("calling kubectl quota") framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000,services=1000000", nsFlag) - By("verifying that the quota was created") + ginkgo.By("verifying that the quota was created") quota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting quota %s: %v", quotaName, err) @@ -1809,14 +1822,14 @@ metadata: } }) - It("should create a quota with scopes", func() { + ginkgo.It("should create a quota with scopes", func() { nsFlag := fmt.Sprintf("--namespace=%v", ns) quotaName := "scopes" - By("calling kubectl quota") + ginkgo.By("calling kubectl quota") framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating", nsFlag) - By("verifying that the quota was created") + ginkgo.By("verifying that the quota was created") quota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting quota %s: %v", quotaName, err) @@ -1837,11 +1850,11 @@ metadata: } }) - It("should reject quota with invalid scopes", func() { + ginkgo.It("should reject quota with invalid scopes", func() { nsFlag := fmt.Sprintf("--namespace=%v", ns) quotaName := "scopes" - By("calling kubectl quota") + ginkgo.By("calling kubectl quota") out, err := framework.RunKubectl("create", "quota", quotaName, "--hard=hard=pods=1000000", "--scopes=Foo", nsFlag) if err == nil { framework.Failf("Expected kubectl to fail, but it succeeded: %s", out) @@ -1968,7 +1981,7 @@ func validateGuestbookApp(c clientset.Interface, ns string) { framework.Logf("Waiting for all frontend pods to be Running.") label := labels.SelectorFromSet(labels.Set(map[string]string{"tier": "frontend", "app": "guestbook"})) err := testutils.WaitForPodsWithLabelRunning(c, ns, label) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Waiting for frontend to serve content.") if !waitForGuestbookResponse(c, "get", "", `{"data": ""}`, guestbookStartupTimeout, ns) { framework.Failf("Frontend service did not start serving content in %v seconds.", guestbookStartupTimeout.Seconds()) @@ -2053,7 +2066,7 @@ func forEachReplicationController(c clientset.Interface, ns, selectorKey, select label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue})) options := metav1.ListOptions{LabelSelector: label.String()} rcs, err = c.CoreV1().ReplicationControllers(ns).List(options) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if len(rcs.Items) > 0 { break } @@ -2115,9 +2128,8 @@ func getUDData(jpgExpected string, ns string) func(clientset.Interface, string) framework.Logf("Unmarshalled json jpg/img => %s , expecting %s .", data, jpgExpected) if strings.Contains(data.Image, jpgExpected) { return nil - } else { - return fmt.Errorf("data served up in container is inaccurate, %s didn't contain %s", data, jpgExpected) } + return fmt.Errorf("data served up in container is inaccurate, %s didn't contain %s", data, jpgExpected) } } diff --git a/test/e2e/kubectl/portforward.go b/test/e2e/kubectl/portforward.go index d29847839f..53c764bc7b 100644 --- a/test/e2e/kubectl/portforward.go +++ b/test/e2e/kubectl/portforward.go @@ -39,8 +39,8 @@ import ( testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const ( @@ -116,6 +116,7 @@ func pfPod(expectedClientData, chunks, chunkSize, chunkIntervalMillis string, bi } } +// WaitForTerminatedContainer wait till a given container be terminated for a given pod. func WaitForTerminatedContainer(f *framework.Framework, pod *v1.Pod, containerName string) error { return framework.WaitForPodCondition(f.ClientSet, f.Namespace.Name, pod.Name, "container terminated", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) { if len(testutils.TerminatedContainers(pod)[containerName]) > 0 { @@ -199,7 +200,7 @@ func runPortForward(ns, podName string, port int) *portForwardCommand { } func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) { - By("Creating the target pod") + ginkgo.By("Creating the target pod") pod := pfPod("", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { framework.Failf("Couldn't create pod: %v", err) @@ -208,21 +209,21 @@ func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) { framework.Failf("Pod did not start running: %v", err) } - By("Running 'kubectl port-forward'") + ginkgo.By("Running 'kubectl port-forward'") cmd := runPortForward(f.Namespace.Name, pod.Name, 80) defer cmd.Stop() - By("Dialing the local port") + ginkgo.By("Dialing the local port") conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port)) if err != nil { framework.Failf("Couldn't connect to port %d: %v", cmd.port, err) } defer func() { - By("Closing the connection to the local port") + ginkgo.By("Closing the connection to the local port") conn.Close() }() - By("Reading data from the local port") + ginkgo.By("Reading data from the local port") fromServer, err := ioutil.ReadAll(conn) if err != nil { framework.Failf("Unexpected error reading data from the server: %v", err) @@ -232,22 +233,22 @@ func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) { framework.Failf("Expected %q from server, got %q", e, a) } - By("Waiting for the target pod to stop running") + ginkgo.By("Waiting for the target pod to stop running") if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil { framework.Failf("Container did not terminate: %v", err) } - By("Verifying logs") - Eventually(func() (string, error) { + ginkgo.By("Verifying logs") + gomega.Eventually(func() (string, error) { return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") - }, postStartWaitTimeout, podCheckInterval).Should(SatisfyAll( - ContainSubstring("Accepted client connection"), - ContainSubstring("Done"), + }, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll( + gomega.ContainSubstring("Accepted client connection"), + gomega.ContainSubstring("Done"), )) } func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) { - By("Creating the target pod") + ginkgo.By("Creating the target pod") pod := pfPod("abc", "1", "1", "1", fmt.Sprintf("%s", bindAddress)) if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { framework.Failf("Couldn't create pod: %v", err) @@ -256,35 +257,35 @@ func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) { framework.Failf("Pod did not start running: %v", err) } - By("Running 'kubectl port-forward'") + ginkgo.By("Running 'kubectl port-forward'") cmd := runPortForward(f.Namespace.Name, pod.Name, 80) defer cmd.Stop() - By("Dialing the local port") + ginkgo.By("Dialing the local port") conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port)) if err != nil { framework.Failf("Couldn't connect to port %d: %v", cmd.port, err) } - By("Closing the connection to the local port") + ginkgo.By("Closing the connection to the local port") conn.Close() - By("Waiting for the target pod to stop running") + ginkgo.By("Waiting for the target pod to stop running") if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil { framework.Failf("Container did not terminate: %v", err) } - By("Verifying logs") - Eventually(func() (string, error) { + ginkgo.By("Verifying logs") + gomega.Eventually(func() (string, error) { return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") - }, postStartWaitTimeout, podCheckInterval).Should(SatisfyAll( - ContainSubstring("Accepted client connection"), - ContainSubstring("Expected to read 3 bytes from client, but got 0 instead"), + }, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll( + gomega.ContainSubstring("Accepted client connection"), + gomega.ContainSubstring("Expected to read 3 bytes from client, but got 0 instead"), )) } func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework) { - By("Creating the target pod") + ginkgo.By("Creating the target pod") pod := pfPod("abc", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { framework.Failf("Couldn't create pod: %v", err) @@ -293,11 +294,11 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework) framework.Failf("Pod did not start running: %v", err) } - By("Running 'kubectl port-forward'") + ginkgo.By("Running 'kubectl port-forward'") cmd := runPortForward(f.Namespace.Name, pod.Name, 80) defer cmd.Stop() - By("Dialing the local port") + ginkgo.By("Dialing the local port") addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port)) if err != nil { framework.Failf("Error resolving tcp addr: %v", err) @@ -307,17 +308,17 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework) framework.Failf("Couldn't connect to port %d: %v", cmd.port, err) } defer func() { - By("Closing the connection to the local port") + ginkgo.By("Closing the connection to the local port") conn.Close() }() - By("Sending the expected data to the local port") + ginkgo.By("Sending the expected data to the local port") fmt.Fprint(conn, "abc") - By("Closing the write half of the client's connection") + ginkgo.By("Closing the write half of the client's connection") conn.CloseWrite() - By("Reading data from the local port") + ginkgo.By("Reading data from the local port") fromServer, err := ioutil.ReadAll(conn) if err != nil { framework.Failf("Unexpected error reading data from the server: %v", err) @@ -327,26 +328,26 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework) framework.Failf("Expected %q from server, got %q", e, a) } - By("Waiting for the target pod to stop running") + ginkgo.By("Waiting for the target pod to stop running") if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil { framework.Failf("Container did not terminate: %v", err) } - By("Verifying logs") - Eventually(func() (string, error) { + ginkgo.By("Verifying logs") + gomega.Eventually(func() (string, error) { return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") - }, postStartWaitTimeout, podCheckInterval).Should(SatisfyAll( - ContainSubstring("Accepted client connection"), - ContainSubstring("Received expected client data"), - ContainSubstring("Done"), + }, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll( + gomega.ContainSubstring("Accepted client connection"), + gomega.ContainSubstring("Received expected client data"), + gomega.ContainSubstring("Done"), )) } func doTestOverWebSockets(bindAddress string, f *framework.Framework) { config, err := framework.LoadConfig() - Expect(err).NotTo(HaveOccurred(), "unable to get base config") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unable to get base config") - By("Creating the pod") + ginkgo.By("Creating the pod") pod := pfPod("def", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { framework.Failf("Couldn't create pod: %v", err) @@ -369,7 +370,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { } defer ws.Close() - Eventually(func() error { + gomega.Eventually(func() error { channel, msg, err := wsRead(ws) if err != nil { return fmt.Errorf("Failed to read completely from websocket %s: %v", url.String(), err) @@ -381,9 +382,9 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { return fmt.Errorf("Received the wrong port: %d", p) } return nil - }, time.Minute, 10*time.Second).Should(BeNil()) + }, time.Minute, 10*time.Second).Should(gomega.BeNil()) - Eventually(func() error { + gomega.Eventually(func() error { channel, msg, err := wsRead(ws) if err != nil { return fmt.Errorf("Failed to read completely from websocket %s: %v", url.String(), err) @@ -395,18 +396,18 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { return fmt.Errorf("Received the wrong port: %d", p) } return nil - }, time.Minute, 10*time.Second).Should(BeNil()) + }, time.Minute, 10*time.Second).Should(gomega.BeNil()) - By("Sending the expected data to the local port") + ginkgo.By("Sending the expected data to the local port") err = wsWrite(ws, 0, []byte("def")) if err != nil { framework.Failf("Failed to write to websocket %s: %v", url.String(), err) } - By("Reading data from the local port") + ginkgo.By("Reading data from the local port") buf := bytes.Buffer{} expectedData := bytes.Repeat([]byte("x"), 100) - Eventually(func() error { + gomega.Eventually(func() error { channel, msg, err := wsRead(ws) if err != nil { return fmt.Errorf("Failed to read completely from websocket %s: %v", url.String(), err) @@ -419,14 +420,14 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { return fmt.Errorf("Expected %q from server, got %q", expectedData, buf.Bytes()) } return nil - }, time.Minute, 10*time.Second).Should(BeNil()) + }, time.Minute, 10*time.Second).Should(gomega.BeNil()) - By("Verifying logs") - Eventually(func() (string, error) { + ginkgo.By("Verifying logs") + gomega.Eventually(func() (string, error) { return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") - }, postStartWaitTimeout, podCheckInterval).Should(SatisfyAll( - ContainSubstring("Accepted client connection"), - ContainSubstring("Received expected client data"), + }, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll( + gomega.ContainSubstring("Accepted client connection"), + gomega.ContainSubstring("Received expected client data"), )) } @@ -435,21 +436,21 @@ var _ = SIGDescribe("Kubectl Port forwarding", func() { framework.KubeDescribe("With a server listening on 0.0.0.0", func() { framework.KubeDescribe("that expects a client request", func() { - It("should support a client that connects, sends NO DATA, and disconnects", func() { + ginkgo.It("should support a client that connects, sends NO DATA, and disconnects", func() { doTestMustConnectSendNothing("0.0.0.0", f) }) - It("should support a client that connects, sends DATA, and disconnects", func() { + ginkgo.It("should support a client that connects, sends DATA, and disconnects", func() { doTestMustConnectSendDisconnect("0.0.0.0", f) }) }) framework.KubeDescribe("that expects NO client request", func() { - It("should support a client that connects, sends DATA, and disconnects", func() { + ginkgo.It("should support a client that connects, sends DATA, and disconnects", func() { doTestConnectSendDisconnect("0.0.0.0", f) }) }) - It("should support forwarding over websockets", func() { + ginkgo.It("should support forwarding over websockets", func() { doTestOverWebSockets("0.0.0.0", f) }) }) @@ -457,21 +458,21 @@ var _ = SIGDescribe("Kubectl Port forwarding", func() { // kubectl port-forward may need elevated privileges to do its job. framework.KubeDescribe("With a server listening on localhost", func() { framework.KubeDescribe("that expects a client request", func() { - It("should support a client that connects, sends NO DATA, and disconnects", func() { + ginkgo.It("should support a client that connects, sends NO DATA, and disconnects", func() { doTestMustConnectSendNothing("localhost", f) }) - It("should support a client that connects, sends DATA, and disconnects", func() { + ginkgo.It("should support a client that connects, sends DATA, and disconnects", func() { doTestMustConnectSendDisconnect("localhost", f) }) }) framework.KubeDescribe("that expects NO client request", func() { - It("should support a client that connects, sends DATA, and disconnects", func() { + ginkgo.It("should support a client that connects, sends DATA, and disconnects", func() { doTestConnectSendDisconnect("localhost", f) }) }) - It("should support forwarding over websockets", func() { + ginkgo.It("should support forwarding over websockets", func() { doTestOverWebSockets("localhost", f) }) }) diff --git a/test/e2e/network/dns.go b/test/e2e/network/dns.go index 0acd30efed..a035afaa7a 100644 --- a/test/e2e/network/dns.go +++ b/test/e2e/network/dns.go @@ -39,7 +39,7 @@ var _ = SIGDescribe("DNS", func() { /* Release : v1.9 Testname: DNS, cluster - Description: When a Pod is created, the pod MUST be able to resolve cluster dns entries such as kubernetes.default via DNS and /etc/hosts. + Description: When a Pod is created, the pod MUST be able to resolve cluster dns entries such as kubernetes.default via DNS. */ framework.ConformanceIt("should provide DNS for the cluster ", func() { // All the names we need to be able to resolve. @@ -53,10 +53,8 @@ var _ = SIGDescribe("DNS", func() { namesToResolve = append(namesToResolve, "google.com") namesToResolve = append(namesToResolve, "metadata") } - hostFQDN := fmt.Sprintf("%s.%s.%s.svc.%s", dnsTestPodHostName, dnsTestServiceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) - hostEntries := []string{hostFQDN, dnsTestPodHostName} - wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, hostEntries, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) - jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, hostEntries, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) + wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) + jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") By("Running these commands on jessie: " + jessieProbeCmd + "\n") @@ -91,6 +89,25 @@ var _ = SIGDescribe("DNS", func() { validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) }) + /* + Release : v1.14 + Testname: DNS, cluster + Description: When a Pod is created, the pod MUST be able to resolve cluster dns entries such as kubernetes.default via /etc/hosts. + */ + framework.ConformanceIt("should provide /etc/hosts entries for the cluster [LinuxOnly]", func() { + hostFQDN := fmt.Sprintf("%s.%s.%s.svc.%s", dnsTestPodHostName, dnsTestServiceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) + hostEntries := []string{hostFQDN, dnsTestPodHostName} + wheezyProbeCmd, wheezyFileNames := createProbeCommand(nil, hostEntries, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) + jessieProbeCmd, jessieFileNames := createProbeCommand(nil, hostEntries, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) + By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") + By("Running these commands on jessie: " + jessieProbeCmd + "\n") + + // Run a pod which probes /etc/hosts and exposes the results by HTTP. + By("creating a pod to probe /etc/hosts") + pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) + validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) + }) + /* Release : v1.9 Testname: DNS, services diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go index 96815c9dd9..0785e7329b 100644 --- a/test/e2e/network/ingress.go +++ b/test/e2e/network/ingress.go @@ -44,12 +44,17 @@ import ( . "github.com/onsi/gomega" ) +const ( + negUpdateTimeout = 2 * time.Minute + instanceGroupAnnotation = "ingress.gcp.kubernetes.io/instance-groups" +) + var _ = SIGDescribe("Loadbalancing: L7", func() { defer GinkgoRecover() var ( ns string - jig *ingress.IngressTestJig - conformanceTests []ingress.IngressConformanceTests + jig *ingress.TestJig + conformanceTests []ingress.ConformanceTests cloudConfig framework.CloudConfig ) f := framework.NewDefaultFramework("ingress") @@ -362,14 +367,14 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { ing, err := f.ClientSet.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{}) framework.ExpectNoError(err) annotations := ing.Annotations - if annotations == nil || annotations[ingress.InstanceGroupAnnotation] == "" { - framework.Logf("Waiting for ingress to get %s annotation. Found annotations: %v", ingress.InstanceGroupAnnotation, annotations) + if annotations == nil || annotations[instanceGroupAnnotation] == "" { + framework.Logf("Waiting for ingress to get %s annotation. Found annotations: %v", instanceGroupAnnotation, annotations) return false, nil } return true, nil }) if pollErr != nil { - framework.ExpectNoError(fmt.Errorf("Timed out waiting for ingress %s to get %s annotation", name, ingress.InstanceGroupAnnotation)) + framework.ExpectNoError(fmt.Errorf("Timed out waiting for ingress %s to get %s annotation", name, instanceGroupAnnotation)) } // Verify that the ingress does not get other annotations like url-map, target-proxy, backends, etc. @@ -569,7 +574,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale) Expect(err).NotTo(HaveOccurred()) } - wait.Poll(10*time.Second, ingress.NEGUpdateTimeout, func() (bool, error) { + wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) { res, err := jig.GetDistinctResponseFromIngress() if err != nil { return false, nil @@ -664,7 +669,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale) Expect(err).NotTo(HaveOccurred()) } - wait.Poll(10*time.Second, ingress.NEGUpdateTimeout, func() (bool, error) { + wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) { svc, err := f.ClientSet.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -982,7 +987,7 @@ func verifyKubemciStatusHas(name, expectedSubStr string) { } } -func executePresharedCertTest(f *framework.Framework, jig *ingress.IngressTestJig, staticIPName string) { +func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, staticIPName string) { preSharedCertName := "test-pre-shared-cert" By(fmt.Sprintf("Creating ssl certificate %q on GCE", preSharedCertName)) testHostname := "test.ingress.com" @@ -1033,7 +1038,7 @@ func executePresharedCertTest(f *framework.Framework, jig *ingress.IngressTestJi Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) } -func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *ingress.IngressTestJig, ipName, ip string) { +func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *ingress.TestJig, ipName, ip string) { jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "static-ip"), f.Namespace.Name, map[string]string{ ingress.IngressStaticIPKey: ipName, ingress.IngressAllowHTTPKey: "false", @@ -1047,7 +1052,7 @@ func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *ingress.IngressTe framework.ExpectNoError(framework.PollURL(fmt.Sprintf("http://%s/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true)) } -func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.IngressTestJig, staticIPName string) { +func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJig, staticIPName string) { By("Creating a set of ingress, service and deployment that have backside re-encryption configured") deployCreated, svcCreated, ingCreated, err := jig.SetUpBacksideHTTPSIngress(f.ClientSet, f.Namespace.Name, staticIPName) defer func() { @@ -1079,7 +1084,7 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.Ingre Expect(err).NotTo(HaveOccurred(), "Failed to verify backside re-encryption ingress") } -func detectHttpVersionAndSchemeTest(f *framework.Framework, jig *ingress.IngressTestJig, address, version, scheme string) { +func detectHttpVersionAndSchemeTest(f *framework.Framework, jig *ingress.TestJig, address, version, scheme string) { timeoutClient := &http.Client{Timeout: ingress.IngressReqTimeout} resp := "" err := wait.PollImmediate(framework.LoadBalancerPollInterval, framework.LoadBalancerPollTimeout, func() (bool, error) { @@ -1102,7 +1107,7 @@ func detectHttpVersionAndSchemeTest(f *framework.Framework, jig *ingress.Ingress Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to get %s or %s, response body: %s", version, scheme, resp)) } -func detectNegAnnotation(f *framework.Framework, jig *ingress.IngressTestJig, gceController *gce.GCEIngressController, ns, name string, negs int) { +func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceController *gce.GCEIngressController, ns, name string, negs int) { wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { svc, err := f.ClientSet.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) if err != nil { diff --git a/test/e2e/network/scale/ingress.go b/test/e2e/network/scale/ingress.go index a570321f3a..696cc9bb4e 100644 --- a/test/e2e/network/scale/ingress.go +++ b/test/e2e/network/scale/ingress.go @@ -62,7 +62,7 @@ var ( // IngressScaleFramework defines the framework for ingress scale testing. type IngressScaleFramework struct { Clientset clientset.Interface - Jig *ingress.IngressTestJig + Jig *ingress.TestJig GCEController *gce.GCEIngressController CloudConfig framework.CloudConfig Logger ingress.TestLogger diff --git a/test/e2e/node/BUILD b/test/e2e/node/BUILD index 6053a44869..0b03a9115c 100644 --- a/test/e2e/node/BUILD +++ b/test/e2e/node/BUILD @@ -37,7 +37,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/common:go_default_library", "//test/e2e/framework:go_default_library", diff --git a/test/e2e/node/pods.go b/test/e2e/node/pods.go index b8ba1f8cad..d5e41404f8 100644 --- a/test/e2e/node/pods.go +++ b/test/e2e/node/pods.go @@ -18,6 +18,7 @@ package node import ( "crypto/tls" + "encoding/json" "fmt" "net/http" "regexp" @@ -30,7 +31,6 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/watch" "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" @@ -74,7 +74,7 @@ var _ = SIGDescribe("Pods Extended", func() { }, } - By("setting up watch") + By("setting up selector") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} pods, err := podClient.List(options) @@ -84,8 +84,6 @@ var _ = SIGDescribe("Pods Extended", func() { LabelSelector: selector.String(), ResourceVersion: pods.ListMeta.ResourceVersion, } - w, err := podClient.Watch(options) - Expect(err).NotTo(HaveOccurred(), "failed to set up watch") By("submitting the pod to kubernetes") podClient.Create(pod) @@ -97,16 +95,6 @@ var _ = SIGDescribe("Pods Extended", func() { Expect(err).NotTo(HaveOccurred(), "failed to query for pod") Expect(len(pods.Items)).To(Equal(1)) - By("verifying pod creation was observed") - select { - case event, _ := <-w.ResultChan(): - if event.Type != watch.Added { - framework.Failf("Failed to observe pod creation: %v", event) - } - case <-time.After(framework.PodStartTimeout): - framework.Failf("Timeout while waiting for pod creation") - } - // We need to wait for the pod to be running, otherwise the deletion // may be carried out immediately rather than gracefully. framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) @@ -143,10 +131,15 @@ var _ = SIGDescribe("Pods Extended", func() { By("deleting the pod gracefully") rsp, err := client.Do(req) Expect(err).NotTo(HaveOccurred(), "failed to use http client to send delete") + Expect(rsp.StatusCode).Should(Equal(http.StatusOK), "failed to delete gracefully by client request") + var lastPod v1.Pod + err = json.NewDecoder(rsp.Body).Decode(&lastPod) + Expect(err).NotTo(HaveOccurred(), "failed to decode graceful termination proxy response") defer rsp.Body.Close() By("verifying the kubelet observed the termination notice") + Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) { podList, err := framework.GetKubeletPods(f.ClientSet, pod.Spec.NodeName) if err != nil { @@ -161,32 +154,12 @@ var _ = SIGDescribe("Pods Extended", func() { framework.Logf("deletion has not yet been observed") return false, nil } - return true, nil + return false, nil } framework.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed") return true, nil })).NotTo(HaveOccurred(), "kubelet never observed the termination notice") - By("verifying pod deletion was observed") - deleted := false - timeout := false - var lastPod *v1.Pod - timer := time.After(2 * time.Minute) - for !deleted && !timeout { - select { - case event, _ := <-w.ResultChan(): - if event.Type == watch.Deleted { - lastPod = event.Object.(*v1.Pod) - deleted = true - } - case <-timer: - timeout = true - } - } - if !deleted { - framework.Failf("Failed to observe pod deletion") - } - Expect(lastPod.DeletionTimestamp).ToNot(BeNil()) Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero()) diff --git a/test/e2e/servicecatalog/framework.go b/test/e2e/servicecatalog/framework.go index 9b6dc4b1ab..39af84e65b 100644 --- a/test/e2e/servicecatalog/framework.go +++ b/test/e2e/servicecatalog/framework.go @@ -18,6 +18,7 @@ package servicecatalog import "github.com/onsi/ginkgo" +// SIGDescribe annotates the test with the SIG label. func SIGDescribe(text string, body func()) bool { return ginkgo.Describe("[sig-service-catalog] "+text, body) } diff --git a/test/e2e/servicecatalog/podpreset.go b/test/e2e/servicecatalog/podpreset.go index 260c1790a2..9a649f7257 100644 --- a/test/e2e/servicecatalog/podpreset.go +++ b/test/e2e/servicecatalog/podpreset.go @@ -30,8 +30,8 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -39,7 +39,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { f := framework.NewDefaultFramework("podpreset") var podClient *framework.PodClient - BeforeEach(func() { + ginkgo.BeforeEach(func() { // only run on gce for the time being til we find an easier way to update // the admission controllers used on the others framework.SkipUnlessProviderIs("gce") @@ -47,8 +47,8 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { }) // Simplest case: all pods succeed promptly - It("should create a pod preset", func() { - By("Creating a pod preset") + ginkgo.It("should create a pod preset", func() { + ginkgo.By("Creating a pod preset") pip := &settings.PodPreset{ ObjectMeta: metav1.ObjectMeta{ @@ -77,9 +77,9 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { if errors.IsNotFound(err) { framework.Skipf("podpresets requires k8s.io/api/settings/v1alpha1 to be enabled") } - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("creating the pod") + ginkgo.By("creating the pod") name := "pod-preset-pod" value := strconv.Itoa(time.Now().Nanosecond()) pod := &v1.Pod{ @@ -102,30 +102,30 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { }, } - By("setting up watch") + ginkgo.By("setting up watch") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} pods, err := podClient.List(options) - Expect(err).NotTo(HaveOccurred(), "failed to query for pod") - Expect(len(pods.Items)).To(Equal(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod") + gomega.Expect(len(pods.Items)).To(gomega.Equal(0)) options = metav1.ListOptions{ LabelSelector: selector.String(), ResourceVersion: pods.ListMeta.ResourceVersion, } w, err := podClient.Watch(options) - Expect(err).NotTo(HaveOccurred(), "failed to set up watch") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to set up watch") - By("submitting the pod to kubernetes") + ginkgo.By("submitting the pod to kubernetes") podClient.Create(pod) - By("verifying the pod is in kubernetes") + ginkgo.By("verifying the pod is in kubernetes") selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} pods, err = podClient.List(options) - Expect(err).NotTo(HaveOccurred(), "failed to query for pod") - Expect(len(pods.Items)).To(Equal(1)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod") + gomega.Expect(len(pods.Items)).To(gomega.Equal(1)) - By("verifying pod creation was observed") + ginkgo.By("verifying pod creation was observed") select { case event, _ := <-w.ResultChan(): if event.Type != watch.Added { @@ -139,10 +139,10 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { // may be carried out immediately rather than gracefully. framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) - By("ensuring pod is modified") + ginkgo.By("ensuring pod is modified") // save the running pod pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to GET scheduled pod") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to GET scheduled pod") // check the annotation is there if _, ok := pod.Annotations["podpreset.admission.kubernetes.io/podpreset-hello"]; !ok { @@ -155,8 +155,8 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { } }) - It("should not modify the pod on conflict", func() { - By("Creating a pod preset") + ginkgo.It("should not modify the pod on conflict", func() { + ginkgo.By("Creating a pod preset") pip := &settings.PodPreset{ ObjectMeta: metav1.ObjectMeta{ @@ -185,9 +185,9 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { if errors.IsNotFound(err) { framework.Skipf("podpresets requires k8s.io/api/settings/v1alpha1 to be enabled") } - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("creating the pod") + ginkgo.By("creating the pod") name := "pod-preset-pod" value := strconv.Itoa(time.Now().Nanosecond()) originalPod := &v1.Pod{ @@ -211,30 +211,30 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { }, } - By("setting up watch") + ginkgo.By("setting up watch") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} pods, err := podClient.List(options) - Expect(err).NotTo(HaveOccurred(), "failed to query for pod") - Expect(len(pods.Items)).To(Equal(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod") + gomega.Expect(len(pods.Items)).To(gomega.Equal(0)) options = metav1.ListOptions{ LabelSelector: selector.String(), ResourceVersion: pods.ListMeta.ResourceVersion, } w, err := podClient.Watch(options) - Expect(err).NotTo(HaveOccurred(), "failed to set up watch") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to set up watch") - By("submitting the pod to kubernetes") + ginkgo.By("submitting the pod to kubernetes") podClient.Create(originalPod) - By("verifying the pod is in kubernetes") + ginkgo.By("verifying the pod is in kubernetes") selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} pods, err = podClient.List(options) - Expect(err).NotTo(HaveOccurred(), "failed to query for pod") - Expect(len(pods.Items)).To(Equal(1)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod") + gomega.Expect(len(pods.Items)).To(gomega.Equal(1)) - By("verifying pod creation was observed") + ginkgo.By("verifying pod creation was observed") select { case event, _ := <-w.ResultChan(): if event.Type != watch.Added { @@ -248,10 +248,10 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { // may be carried out immediately rather than gracefully. framework.ExpectNoError(f.WaitForPodRunning(originalPod.Name)) - By("ensuring pod is modified") + ginkgo.By("ensuring pod is modified") // save the running pod pod, err := podClient.Get(originalPod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to GET scheduled pod") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to GET scheduled pod") // check the annotation is not there if _, ok := pod.Annotations["podpreset.admission.kubernetes.io/podpreset-hello"]; ok { diff --git a/test/e2e/ui/dashboard.go b/test/e2e/ui/dashboard.go index f1473324b2..d64bbbf761 100644 --- a/test/e2e/ui/dashboard.go +++ b/test/e2e/ui/dashboard.go @@ -28,12 +28,12 @@ import ( "k8s.io/kubernetes/test/e2e/framework" testutils "k8s.io/kubernetes/test/utils" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) var _ = SIGDescribe("Kubernetes Dashboard", func() { - BeforeEach(func() { + ginkgo.BeforeEach(func() { // TODO(kubernetes/kubernetes#61559): Enable dashboard here rather than skip the test. framework.SkipIfProviderIs("gke") }) @@ -49,17 +49,17 @@ var _ = SIGDescribe("Kubernetes Dashboard", func() { f := framework.NewDefaultFramework(uiServiceName) - It("should check that the kubernetes-dashboard instance is alive", func() { - By("Checking whether the kubernetes-dashboard service exists.") + ginkgo.It("should check that the kubernetes-dashboard instance is alive", func() { + ginkgo.By("Checking whether the kubernetes-dashboard service exists.") err := framework.WaitForService(f.ClientSet, uiNamespace, uiServiceName, true, framework.Poll, framework.ServiceStartTimeout) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Checking to make sure the kubernetes-dashboard pods are running") + ginkgo.By("Checking to make sure the kubernetes-dashboard pods are running") selector := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": uiAppName})) err = testutils.WaitForPodsWithLabelRunning(f.ClientSet, uiNamespace, selector) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Checking to make sure we get a response from the kubernetes-dashboard.") + ginkgo.By("Checking to make sure we get a response from the kubernetes-dashboard.") err = wait.Poll(framework.Poll, serverStartTimeout, func() (bool, error) { var status int proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) @@ -90,6 +90,6 @@ var _ = SIGDescribe("Kubernetes Dashboard", func() { // Don't return err here as it aborts polling. return status == http.StatusOK, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) }) diff --git a/test/e2e/ui/framework.go b/test/e2e/ui/framework.go index 4cf72e5aa1..930802fdcb 100644 --- a/test/e2e/ui/framework.go +++ b/test/e2e/ui/framework.go @@ -18,6 +18,7 @@ package ui import "github.com/onsi/ginkgo" +// SIGDescribe annotates the test with the SIG label. func SIGDescribe(text string, body func()) bool { return ginkgo.Describe("[sig-ui] "+text, body) } diff --git a/test/e2e/upgrades/ingress.go b/test/e2e/upgrades/ingress.go index 75d6e14da4..35aa3af48b 100644 --- a/test/e2e/upgrades/ingress.go +++ b/test/e2e/upgrades/ingress.go @@ -44,7 +44,7 @@ type IngressUpgradeTest struct { gceController *gce.GCEIngressController // holds GCP resources pre-upgrade resourceStore *GCPResourceStore - jig *ingress.IngressTestJig + jig *ingress.TestJig httpClient *http.Client ip string ipName string diff --git a/test/e2e_node/BUILD b/test/e2e_node/BUILD index a8cf1c6b3c..6ada6c9ea2 100644 --- a/test/e2e_node/BUILD +++ b/test/e2e_node/BUILD @@ -60,7 +60,6 @@ go_library( "//vendor/k8s.io/klog:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ - "//pkg/api/v1/node:go_default_library", "//pkg/util/procfs:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", @@ -71,6 +70,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//test/e2e/perftype:go_default_library", "//test/e2e_node/perftype:go_default_library", + "//test/utils:go_default_library", "//vendor/github.com/google/cadvisor/client/v2:go_default_library", "//vendor/github.com/google/cadvisor/info/v2:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", @@ -114,7 +114,6 @@ go_test( embed = [":go_default_library"], tags = ["e2e"], deps = [ - "//pkg/api/v1/node:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet:go_default_library", @@ -157,6 +156,7 @@ go_test( "//test/e2e/framework/metrics:go_default_library", "//test/e2e_node/perf/workloads:go_default_library", "//test/e2e_node/services:go_default_library", + "//test/utils:go_default_library", "//test/utils/image:go_default_library", "//vendor/github.com/blang/semver:go_default_library", "//vendor/github.com/coreos/go-systemd/util:go_default_library", @@ -175,7 +175,6 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//test/utils:go_default_library", "//vendor/github.com/kardianos/osext:go_default_library", "//vendor/github.com/onsi/ginkgo/config:go_default_library", "//vendor/github.com/onsi/ginkgo/reporters:go_default_library", diff --git a/test/e2e_node/e2e_node_suite_test.go b/test/e2e_node/e2e_node_suite_test.go index 41357cc407..623b7b1839 100644 --- a/test/e2e_node/e2e_node_suite_test.go +++ b/test/e2e_node/e2e_node_suite_test.go @@ -39,7 +39,6 @@ import ( utilyaml "k8s.io/apimachinery/pkg/util/yaml" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/cmd/kubeadm/app/util/system" - nodeutil "k8s.io/kubernetes/pkg/api/v1/node" commontest "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e_node/services" @@ -234,7 +233,7 @@ func waitForNodeReady() { if err != nil { return fmt.Errorf("failed to get node: %v", err) } - if !nodeutil.IsNodeReady(node) { + if !isNodeReady(node) { return fmt.Errorf("node is not ready: %+v", node) } return nil @@ -310,3 +309,13 @@ func loadSystemSpecFromFile(filename string) (*system.SysSpec, error) { } return spec, nil } + +// isNodeReady returns true if a node is ready; false otherwise. +func isNodeReady(node *v1.Node) bool { + for _, c := range node.Status.Conditions { + if c.Type == v1.NodeReady { + return c.Status == v1.ConditionTrue + } + } + return false +} diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index da8444eb49..abbd22f845 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -29,7 +29,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" - nodeutil "k8s.io/kubernetes/pkg/api/v1/node" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/eviction" @@ -37,6 +36,7 @@ import ( kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/test/e2e/framework" + testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" . "github.com/onsi/ginkgo" @@ -682,7 +682,7 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe // Returns TRUE if the node has the node condition, FALSE otherwise func hasNodeCondition(f *framework.Framework, expectedNodeCondition v1.NodeConditionType) bool { localNodeStatus := getLocalNode(f).Status - _, actualNodeCondition := nodeutil.GetNodeCondition(&localNodeStatus, expectedNodeCondition) + _, actualNodeCondition := testutils.GetNodeCondition(&localNodeStatus, expectedNodeCondition) Expect(actualNodeCondition).NotTo(BeNil()) return actualNodeCondition.Status == v1.ConditionTrue } diff --git a/test/e2e_node/node_problem_detector_linux.go b/test/e2e_node/node_problem_detector_linux.go index 36a63193c9..e3119fcd1b 100644 --- a/test/e2e_node/node_problem_detector_linux.go +++ b/test/e2e_node/node_problem_detector_linux.go @@ -32,9 +32,9 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" coreclientset "k8s.io/client-go/kubernetes/typed/core/v1" - nodeutil "k8s.io/kubernetes/pkg/api/v1/node" "k8s.io/kubernetes/pkg/kubelet/util" "k8s.io/kubernetes/test/e2e/framework" + testutils "k8s.io/kubernetes/test/utils" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -427,7 +427,7 @@ func verifyNodeCondition(n coreclientset.NodeInterface, condition v1.NodeConditi if err != nil { return err } - _, c := nodeutil.GetNodeCondition(&node.Status, condition) + _, c := testutils.GetNodeCondition(&node.Status, condition) if c == nil { return fmt.Errorf("node condition %q not found", condition) } diff --git a/test/images/webhook/BUILD b/test/images/webhook/BUILD index 17f9f04925..80a980253d 100644 --- a/test/images/webhook/BUILD +++ b/test/images/webhook/BUILD @@ -4,6 +4,7 @@ go_library( name = "go_default_library", srcs = [ "addlabel.go", + "alwaysallow.go", "alwaysdeny.go", "config.go", "configmap.go", diff --git a/test/images/webhook/VERSION b/test/images/webhook/VERSION index fe9e4faa6b..42e1b6f225 100644 --- a/test/images/webhook/VERSION +++ b/test/images/webhook/VERSION @@ -1 +1 @@ -1.13v1 +1.14v1 diff --git a/test/images/webhook/alwaysallow.go b/test/images/webhook/alwaysallow.go new file mode 100644 index 0000000000..f9a4aa9a89 --- /dev/null +++ b/test/images/webhook/alwaysallow.go @@ -0,0 +1,36 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "time" + + "k8s.io/api/admission/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" +) + +// alwaysAllowDelayFiveSeconds sleeps for five seconds and allows all requests made to this function. +func alwaysAllowDelayFiveSeconds(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { + klog.V(2).Info("always-allow-with-delay sleeping for 5 seconds") + time.Sleep(5 * time.Second) + klog.V(2).Info("calling always-allow") + reviewResponse := v1beta1.AdmissionResponse{} + reviewResponse.Allowed = true + reviewResponse.Result = &metav1.Status{Message: "this webhook allows all requests"} + return &reviewResponse +} diff --git a/test/images/webhook/main.go b/test/images/webhook/main.go index 0d9460ca8b..87484b99ea 100644 --- a/test/images/webhook/main.go +++ b/test/images/webhook/main.go @@ -91,6 +91,10 @@ func serve(w http.ResponseWriter, r *http.Request, admit admitFunc) { } } +func serveAlwaysAllowDelayFiveSeconds(w http.ResponseWriter, r *http.Request) { + serve(w, r, alwaysAllowDelayFiveSeconds) +} + func serveAlwaysDeny(w http.ResponseWriter, r *http.Request) { serve(w, r, alwaysDeny) } @@ -132,10 +136,12 @@ func serveCRD(w http.ResponseWriter, r *http.Request) { } func main() { + klog.InitFlags(nil) var config Config config.addFlags() flag.Parse() + http.HandleFunc("/always-allow-delay-5s", serveAlwaysAllowDelayFiveSeconds) http.HandleFunc("/always-deny", serveAlwaysDeny) http.HandleFunc("/add-label", serveAddLabel) http.HandleFunc("/pods", servePods) diff --git a/test/integration/apimachinery/watch_restart_test.go b/test/integration/apimachinery/watch_restart_test.go index 1bd288788a..2d723c9611 100644 --- a/test/integration/apimachinery/watch_restart_test.go +++ b/test/integration/apimachinery/watch_restart_test.go @@ -172,6 +172,21 @@ func TestWatchRestartsIfTimeoutNotReached(t *testing.T) { }, // regular watcher; unfortunately destined to fail normalizeOutputFunc: noopNormalization, }, + { + name: "RetryWatcher survives closed watches", + succeed: true, + secret: newTestSecret("secret-02"), + getWatcher: func(c *kubernetes.Clientset, secret *v1.Secret) (watch.Interface, error, func()) { + lw := &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return getWatchFunc(c, secret)(options) + }, + } + w, err := watchtools.NewRetryWatcher(secret.ResourceVersion, lw) + return w, err, func() { <-w.Done() } + }, + normalizeOutputFunc: noopNormalization, + }, { name: "InformerWatcher survives closed watches", succeed: true, diff --git a/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go b/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go index 0836af197b..d723cb5416 100644 --- a/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go +++ b/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go @@ -57,7 +57,7 @@ func TestAdmission(t *testing.T) { }, } - updatedPod, err := client.Core().Pods(pod.Namespace).Create(&pod) + updatedPod, err := client.CoreV1().Pods(pod.Namespace).Create(&pod) if err != nil { t.Fatalf("error creating pod: %v", err) } diff --git a/test/integration/evictions/evictions_test.go b/test/integration/evictions/evictions_test.go index 640e285da5..b667f45101 100644 --- a/test/integration/evictions/evictions_test.go +++ b/test/integration/evictions/evictions_test.go @@ -19,6 +19,7 @@ package evictions import ( "fmt" "net/http/httptest" + "reflect" "sync" "sync/atomic" "testing" @@ -37,7 +38,6 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/controller/disruption" "k8s.io/kubernetes/test/integration/framework" - "reflect" ) const ( @@ -89,7 +89,7 @@ func TestConcurrentEvictionRequests(t *testing.T) { waitToObservePods(t, informers.Core().V1().Pods().Informer(), numOfEvictions, v1.PodRunning) pdb := newPDB() - if _, err := clientSet.Policy().PodDisruptionBudgets(ns.Name).Create(pdb); err != nil { + if _, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).Create(pdb); err != nil { t.Errorf("Failed to create PodDisruptionBudget: %v", err) } @@ -107,7 +107,7 @@ func TestConcurrentEvictionRequests(t *testing.T) { eviction := newEviction(ns.Name, podName, deleteOption) err := wait.PollImmediate(5*time.Second, 60*time.Second, func() (bool, error) { - e := clientSet.Policy().Evictions(ns.Name).Evict(eviction) + e := clientSet.PolicyV1beta1().Evictions(ns.Name).Evict(eviction) switch { case errors.IsTooManyRequests(e): return false, nil @@ -151,7 +151,7 @@ func TestConcurrentEvictionRequests(t *testing.T) { close(errCh) var errList []error - if err := clientSet.Policy().PodDisruptionBudgets(ns.Name).Delete(pdb.Name, deleteOption); err != nil { + if err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).Delete(pdb.Name, deleteOption); err != nil { errList = append(errList, fmt.Errorf("Failed to delete PodDisruptionBudget: %v", err)) } for err := range errCh { @@ -202,20 +202,20 @@ func TestTerminalPodEviction(t *testing.T) { waitToObservePods(t, informers.Core().V1().Pods().Informer(), 1, v1.PodSucceeded) pdb := newPDB() - if _, err := clientSet.Policy().PodDisruptionBudgets(ns.Name).Create(pdb); err != nil { + if _, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).Create(pdb); err != nil { t.Errorf("Failed to create PodDisruptionBudget: %v", err) } waitPDBStable(t, clientSet, 1, ns.Name, pdb.Name) - pdbList, err := clientSet.Policy().PodDisruptionBudgets(ns.Name).List(metav1.ListOptions{}) + pdbList, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).List(metav1.ListOptions{}) if err != nil { t.Fatalf("Error while listing pod disruption budget") } oldPdb := pdbList.Items[0] eviction := newEviction(ns.Name, pod.Name, deleteOption) err = wait.PollImmediate(5*time.Second, 60*time.Second, func() (bool, error) { - e := clientSet.Policy().Evictions(ns.Name).Evict(eviction) + e := clientSet.PolicyV1beta1().Evictions(ns.Name).Evict(eviction) switch { case errors.IsTooManyRequests(e): return false, nil @@ -230,7 +230,7 @@ func TestTerminalPodEviction(t *testing.T) { if err != nil { t.Fatalf("Eviction of pod failed %v", err) } - pdbList, err = clientSet.Policy().PodDisruptionBudgets(ns.Name).List(metav1.ListOptions{}) + pdbList, err = clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).List(metav1.ListOptions{}) if err != nil { t.Fatalf("Error while listing pod disruption budget") } @@ -240,7 +240,7 @@ func TestTerminalPodEviction(t *testing.T) { t.Fatalf("Expected the pdb generation to be of same value %v but got %v", newPdb.Status.ObservedGeneration, oldPdb.Status.ObservedGeneration) } - if err := clientSet.Policy().PodDisruptionBudgets(ns.Name).Delete(pdb.Name, deleteOption); err != nil { + if err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).Delete(pdb.Name, deleteOption); err != nil { t.Fatalf("Failed to delete pod disruption budget") } } @@ -364,7 +364,7 @@ func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podN func waitPDBStable(t *testing.T, clientSet clientset.Interface, podNum int32, ns, pdbName string) { if err := wait.PollImmediate(2*time.Second, 60*time.Second, func() (bool, error) { - pdb, err := clientSet.Policy().PodDisruptionBudgets(ns).Get(pdbName, metav1.GetOptions{}) + pdb, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns).Get(pdbName, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/integration/metrics/metrics_test.go b/test/integration/metrics/metrics_test.go index 969dbe6088..34ed806c91 100644 --- a/test/integration/metrics/metrics_test.go +++ b/test/integration/metrics/metrics_test.go @@ -111,7 +111,7 @@ func TestApiserverMetrics(t *testing.T) { // Make a request to the apiserver to ensure there's at least one data point // for the metrics we're expecting -- otherwise, they won't be exported. client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - if _, err := client.Core().Pods(metav1.NamespaceDefault).List(metav1.ListOptions{}); err != nil { + if _, err := client.CoreV1().Pods(metav1.NamespaceDefault).List(metav1.ListOptions{}); err != nil { t.Fatalf("unexpected error getting pods: %v", err) } diff --git a/test/integration/objectmeta/objectmeta_test.go b/test/integration/objectmeta/objectmeta_test.go index 1da1ddaf5e..4e4ee32be3 100644 --- a/test/integration/objectmeta/objectmeta_test.go +++ b/test/integration/objectmeta/objectmeta_test.go @@ -41,12 +41,12 @@ func TestIgnoreClusterName(t *testing.T) { ClusterName: "cluster-name-to-ignore", }, } - nsNew, err := client.Core().Namespaces().Create(&ns) + nsNew, err := client.CoreV1().Namespaces().Create(&ns) assert.Nil(t, err) assert.Equal(t, ns.Name, nsNew.Name) assert.Empty(t, nsNew.ClusterName) - nsNew, err = client.Core().Namespaces().Update(&ns) + nsNew, err = client.CoreV1().Namespaces().Update(&ns) assert.Nil(t, err) assert.Equal(t, ns.Name, nsNew.Name) assert.Empty(t, nsNew.ClusterName) diff --git a/test/integration/pods/pods_test.go b/test/integration/pods/pods_test.go index 8504fbf008..21c4bdc0d3 100644 --- a/test/integration/pods/pods_test.go +++ b/test/integration/pods/pods_test.go @@ -129,13 +129,13 @@ func TestPodUpdateActiveDeadlineSeconds(t *testing.T) { pod.Spec.ActiveDeadlineSeconds = tc.original pod.ObjectMeta.Name = fmt.Sprintf("activedeadlineseconds-test-%v", i) - if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil { + if _, err := client.CoreV1().Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod: %v", err) } pod.Spec.ActiveDeadlineSeconds = tc.update - _, err := client.Core().Pods(ns.Name).Update(pod) + _, err := client.CoreV1().Pods(ns.Name).Update(pod) if tc.valid && err != nil { t.Errorf("%v: failed to update pod: %v", tc.name, err) } else if !tc.valid && err == nil { @@ -173,7 +173,7 @@ func TestPodReadOnlyFilesystem(t *testing.T) { }, } - if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil { + if _, err := client.CoreV1().Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod: %v", err) } diff --git a/test/integration/quota/quota_test.go b/test/integration/quota/quota_test.go index 9f6a9a418b..891b9dc9d4 100644 --- a/test/integration/quota/quota_test.go +++ b/test/integration/quota/quota_test.go @@ -103,7 +103,7 @@ func TestQuota(t *testing.T) { qc := quotainstall.NewQuotaConfigurationForControllers(listerFuncForResource) informersStarted := make(chan struct{}) resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{ - QuotaClient: clientset.Core(), + QuotaClient: clientset.CoreV1(), ResourceQuotaInformer: informers.Core().V1().ResourceQuotas(), ResyncPeriod: controller.NoResyncPeriodFunc, InformerFactory: informers, @@ -151,12 +151,12 @@ func TestQuota(t *testing.T) { } func waitForQuota(t *testing.T, quota *v1.ResourceQuota, clientset *clientset.Clientset) { - w, err := clientset.Core().ResourceQuotas(quota.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: quota.Name})) + w, err := clientset.CoreV1().ResourceQuotas(quota.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: quota.Name})) if err != nil { t.Fatalf("unexpected error: %v", err) } - if _, err := clientset.Core().ResourceQuotas(quota.Namespace).Create(quota); err != nil { + if _, err := clientset.CoreV1().ResourceQuotas(quota.Namespace).Create(quota); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -210,12 +210,12 @@ func scale(t *testing.T, namespace string, clientset *clientset.Clientset) { }, } - w, err := clientset.Core().ReplicationControllers(namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: rc.Name})) + w, err := clientset.CoreV1().ReplicationControllers(namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: rc.Name})) if err != nil { t.Fatalf("unexpected error: %v", err) } - if _, err := clientset.Core().ReplicationControllers(namespace).Create(rc); err != nil { + if _, err := clientset.CoreV1().ReplicationControllers(namespace).Create(rc); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -239,7 +239,7 @@ func scale(t *testing.T, namespace string, clientset *clientset.Clientset) { return false, nil }) if err != nil { - pods, _ := clientset.Core().Pods(namespace).List(metav1.ListOptions{LabelSelector: labels.Everything().String(), FieldSelector: fields.Everything().String()}) + pods, _ := clientset.CoreV1().Pods(namespace).List(metav1.ListOptions{LabelSelector: labels.Everything().String(), FieldSelector: fields.Everything().String()}) t.Fatalf("unexpected error: %v, ended with %v pods", err, len(pods.Items)) } } @@ -301,7 +301,7 @@ func TestQuotaLimitedResourceDenial(t *testing.T) { qc := quotainstall.NewQuotaConfigurationForControllers(listerFuncForResource) informersStarted := make(chan struct{}) resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{ - QuotaClient: clientset.Core(), + QuotaClient: clientset.CoreV1(), ResourceQuotaInformer: informers.Core().V1().ResourceQuotas(), ResyncPeriod: controller.NoResyncPeriodFunc, InformerFactory: informers, @@ -339,7 +339,7 @@ func TestQuotaLimitedResourceDenial(t *testing.T) { }, }, } - if _, err := clientset.Core().Pods(ns.Name).Create(pod); err == nil { + if _, err := clientset.CoreV1().Pods(ns.Name).Create(pod); err == nil { t.Fatalf("expected error for insufficient quota") } @@ -362,7 +362,7 @@ func TestQuotaLimitedResourceDenial(t *testing.T) { // attempt to create a new pod once the quota is propagated err = wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) { // retry until we succeed (to allow time for all changes to propagate) - if _, err := clientset.Core().Pods(ns.Name).Create(pod); err == nil { + if _, err := clientset.CoreV1().Pods(ns.Name).Create(pod); err == nil { return true, nil } return false, nil diff --git a/test/integration/serviceaccount/service_account_test.go b/test/integration/serviceaccount/service_account_test.go index fdf8cbdfa3..0baf1a7a89 100644 --- a/test/integration/serviceaccount/service_account_test.go +++ b/test/integration/serviceaccount/service_account_test.go @@ -72,7 +72,7 @@ func TestServiceAccountAutoCreate(t *testing.T) { ns := "test-service-account-creation" // Create namespace - _, err = c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) + _, err = c.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) if err != nil { t.Fatalf("could not create namespace: %v", err) } @@ -84,7 +84,7 @@ func TestServiceAccountAutoCreate(t *testing.T) { } // Delete service account - err = c.Core().ServiceAccounts(ns).Delete(defaultUser.Name, nil) + err = c.CoreV1().ServiceAccounts(ns).Delete(defaultUser.Name, nil) if err != nil { t.Fatalf("Could not delete default serviceaccount: %v", err) } @@ -110,13 +110,13 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) { name := "my-service-account" // Create namespace - _, err = c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) + _, err = c.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) if err != nil { t.Fatalf("could not create namespace: %v", err) } // Create service account - serviceAccount, err := c.Core().ServiceAccounts(ns).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: name}}) + serviceAccount, err := c.CoreV1().ServiceAccounts(ns).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: name}}) if err != nil { t.Fatalf("Service Account not created: %v", err) } @@ -128,7 +128,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) { } // Delete token - err = c.Core().Secrets(ns).Delete(token1Name, nil) + err = c.CoreV1().Secrets(ns).Delete(token1Name, nil) if err != nil { t.Fatalf("Could not delete token: %v", err) } @@ -146,12 +146,12 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) { } // Trigger creation of a new referenced token - serviceAccount, err = c.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) + serviceAccount, err = c.CoreV1().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) if err != nil { t.Fatal(err) } serviceAccount.Secrets = []v1.ObjectReference{} - _, err = c.Core().ServiceAccounts(ns).Update(serviceAccount) + _, err = c.CoreV1().ServiceAccounts(ns).Update(serviceAccount) if err != nil { t.Fatal(err) } @@ -169,7 +169,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) { } // Delete service account - err = c.Core().ServiceAccounts(ns).Delete(name, nil) + err = c.CoreV1().ServiceAccounts(ns).Delete(name, nil) if err != nil { t.Fatal(err) } @@ -178,7 +178,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) { tokensToCleanup := sets.NewString(token1Name, token2Name, token3Name) err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) { // Get all secrets in the namespace - secrets, err := c.Core().Secrets(ns).List(metav1.ListOptions{}) + secrets, err := c.CoreV1().Secrets(ns).List(metav1.ListOptions{}) // Retrieval errors should fail if err != nil { return false, err @@ -207,7 +207,7 @@ func TestServiceAccountTokenAutoMount(t *testing.T) { ns := "auto-mount-ns" // Create "my" namespace - _, err = c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) + _, err = c.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) if err != nil && !errors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } @@ -261,7 +261,7 @@ func TestServiceAccountTokenAutoMount(t *testing.T) { } expectedContainer2VolumeMounts := protoPod.Spec.Containers[1].VolumeMounts - createdPod, err := c.Core().Pods(ns).Create(&protoPod) + createdPod, err := c.CoreV1().Pods(ns).Create(&protoPod) if err != nil { t.Fatal(err) } @@ -290,19 +290,19 @@ func TestServiceAccountTokenAuthentication(t *testing.T) { otherns := "other-ns" // Create "my" namespace - _, err = c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}}) + _, err = c.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}}) if err != nil && !errors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } // Create "other" namespace - _, err = c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: otherns}}) + _, err = c.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: otherns}}) if err != nil && !errors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } // Create "ro" user in myns - _, err = c.Core().ServiceAccounts(myns).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readOnlyServiceAccountName}}) + _, err = c.CoreV1().ServiceAccounts(myns).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readOnlyServiceAccountName}}) if err != nil { t.Fatalf("Service Account not created: %v", err) } @@ -315,13 +315,13 @@ func TestServiceAccountTokenAuthentication(t *testing.T) { roClient := clientset.NewForConfigOrDie(&roClientConfig) doServiceAccountAPIRequests(t, roClient, myns, true, true, false) doServiceAccountAPIRequests(t, roClient, otherns, true, false, false) - err = c.Core().Secrets(myns).Delete(roTokenName, nil) + err = c.CoreV1().Secrets(myns).Delete(roTokenName, nil) if err != nil { t.Fatalf("could not delete token: %v", err) } // wait for delete to be observed and reacted to via watch wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { - sa, err := c.Core().ServiceAccounts(myns).Get(readOnlyServiceAccountName, metav1.GetOptions{}) + sa, err := c.CoreV1().ServiceAccounts(myns).Get(readOnlyServiceAccountName, metav1.GetOptions{}) if err != nil { return false, err } @@ -335,7 +335,7 @@ func TestServiceAccountTokenAuthentication(t *testing.T) { doServiceAccountAPIRequests(t, roClient, myns, false, false, false) // Create "rw" user in myns - _, err = c.Core().ServiceAccounts(myns).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readWriteServiceAccountName}}) + _, err = c.CoreV1().ServiceAccounts(myns).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readWriteServiceAccountName}}) if err != nil { t.Fatalf("Service Account not created: %v", err) } @@ -489,13 +489,13 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie func getServiceAccount(c *clientset.Clientset, ns string, name string, shouldWait bool) (*v1.ServiceAccount, error) { if !shouldWait { - return c.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) + return c.CoreV1().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) } var user *v1.ServiceAccount var err error err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) { - user, err = c.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) + user, err = c.CoreV1().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) if errors.IsNotFound(err) { return false, nil } @@ -512,7 +512,7 @@ func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name st token := "" findToken := func() (bool, error) { - user, err := c.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) + user, err := c.CoreV1().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) if errors.IsNotFound(err) { return false, nil } @@ -521,7 +521,7 @@ func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name st } for _, ref := range user.Secrets { - secret, err := c.Core().Secrets(ns).Get(ref.Name, metav1.GetOptions{}) + secret, err := c.CoreV1().Secrets(ns).Get(ref.Name, metav1.GetOptions{}) if errors.IsNotFound(err) { continue } @@ -571,17 +571,17 @@ func doServiceAccountAPIRequests(t *testing.T, c *clientset.Clientset, ns string readOps := []testOperation{ func() error { - _, err := c.Core().Secrets(ns).List(metav1.ListOptions{}) + _, err := c.CoreV1().Secrets(ns).List(metav1.ListOptions{}) return err }, func() error { - _, err := c.Core().Pods(ns).List(metav1.ListOptions{}) + _, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) return err }, } writeOps := []testOperation{ - func() error { _, err := c.Core().Secrets(ns).Create(testSecret); return err }, - func() error { return c.Core().Secrets(ns).Delete(testSecret.Name, nil) }, + func() error { _, err := c.CoreV1().Secrets(ns).Create(testSecret); return err }, + func() error { return c.CoreV1().Secrets(ns).Delete(testSecret.Name, nil) }, } for _, op := range readOps { diff --git a/test/integration/statefulset/statefulset_test.go b/test/integration/statefulset/statefulset_test.go index 191d258a3e..427ac5a400 100644 --- a/test/integration/statefulset/statefulset_test.go +++ b/test/integration/statefulset/statefulset_test.go @@ -85,7 +85,7 @@ func TestDeletingAndFailedPods(t *testing.T) { waitSTSStable(t, c, sts) // Verify STS creates 2 pods - podClient := c.Core().Pods(ns.Name) + podClient := c.CoreV1().Pods(ns.Name) pods := getPods(t, podClient, labelMap) if len(pods.Items) != 2 { t.Fatalf("len(pods) = %d, want 2", len(pods.Items)) @@ -97,7 +97,7 @@ func TestDeletingAndFailedPods(t *testing.T) { updatePod(t, podClient, deletingPod.Name, func(pod *v1.Pod) { pod.Finalizers = []string{"fake.example.com/blockDeletion"} }) - if err := c.Core().Pods(ns.Name).Delete(deletingPod.Name, &metav1.DeleteOptions{}); err != nil { + if err := c.CoreV1().Pods(ns.Name).Delete(deletingPod.Name, &metav1.DeleteOptions{}); err != nil { t.Fatalf("error deleting pod %s: %v", deletingPod.Name, err) } diff --git a/test/integration/statefulset/util.go b/test/integration/statefulset/util.go index 447ee759c6..0accec2130 100644 --- a/test/integration/statefulset/util.go +++ b/test/integration/statefulset/util.go @@ -199,7 +199,7 @@ func runControllerAndInformers(sc *statefulset.StatefulSetController, informers } func createHeadlessService(t *testing.T, clientSet clientset.Interface, headlessService *v1.Service) { - _, err := clientSet.Core().Services(headlessService.Namespace).Create(headlessService) + _, err := clientSet.CoreV1().Services(headlessService.Namespace).Create(headlessService) if err != nil { t.Fatalf("failed creating headless service: %v", err) } @@ -216,7 +216,7 @@ func createSTSsPods(t *testing.T, clientSet clientset.Interface, stss []*appsv1. createdSTSs = append(createdSTSs, createdSTS) } for _, pod := range pods { - createdPod, err := clientSet.Core().Pods(pod.Namespace).Create(pod) + createdPod, err := clientSet.CoreV1().Pods(pod.Namespace).Create(pod) if err != nil { t.Fatalf("failed to create pod %s: %v", pod.Name, err) } diff --git a/test/integration/ttlcontroller/ttlcontroller_test.go b/test/integration/ttlcontroller/ttlcontroller_test.go index c0c2c27d6a..b011a43823 100644 --- a/test/integration/ttlcontroller/ttlcontroller_test.go +++ b/test/integration/ttlcontroller/ttlcontroller_test.go @@ -59,7 +59,7 @@ func createNodes(t *testing.T, client *clientset.Clientset, startIndex, endIndex Name: fmt.Sprintf("node-%d", idx), }, } - if _, err := client.Core().Nodes().Create(node); err != nil { + if _, err := client.CoreV1().Nodes().Create(node); err != nil { t.Fatalf("Failed to create node: %v", err) } }(i) @@ -74,7 +74,7 @@ func deleteNodes(t *testing.T, client *clientset.Clientset, startIndex, endIndex go func(idx int) { defer wg.Done() name := fmt.Sprintf("node-%d", idx) - if err := client.Core().Nodes().Delete(name, &metav1.DeleteOptions{}); err != nil { + if err := client.CoreV1().Nodes().Delete(name, &metav1.DeleteOptions{}); err != nil { t.Fatalf("Failed to delete node: %v", err) } }(i) diff --git a/test/integration/volume/attach_detach_test.go b/test/integration/volume/attach_detach_test.go index c737c87c6f..9b908ae21b 100644 --- a/test/integration/volume/attach_detach_test.go +++ b/test/integration/volume/attach_detach_test.go @@ -162,13 +162,13 @@ func TestPodDeletionWithDswp(t *testing.T) { pod := fakePodWithVol(namespaceName) podStopCh := make(chan struct{}) - if _, err := testClient.Core().Nodes().Create(node); err != nil { + if _, err := testClient.CoreV1().Nodes().Create(node); err != nil { t.Fatalf("Failed to created node : %v", err) } go informers.Core().V1().Nodes().Informer().Run(podStopCh) - if _, err := testClient.Core().Pods(ns.Name).Create(pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod : %v", err) } @@ -229,13 +229,13 @@ func TestPodUpdateWithWithADC(t *testing.T) { pod := fakePodWithVol(namespaceName) podStopCh := make(chan struct{}) - if _, err := testClient.Core().Nodes().Create(node); err != nil { + if _, err := testClient.CoreV1().Nodes().Create(node); err != nil { t.Fatalf("Failed to created node : %v", err) } go informers.Core().V1().Nodes().Informer().Run(podStopCh) - if _, err := testClient.Core().Pods(ns.Name).Create(pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod : %v", err) } @@ -264,7 +264,7 @@ func TestPodUpdateWithWithADC(t *testing.T) { pod.Status.Phase = v1.PodSucceeded - if _, err := testClient.Core().Pods(ns.Name).UpdateStatus(pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).UpdateStatus(pod); err != nil { t.Errorf("Failed to update pod : %v", err) } @@ -297,13 +297,13 @@ func TestPodUpdateWithKeepTerminatedPodVolumes(t *testing.T) { pod := fakePodWithVol(namespaceName) podStopCh := make(chan struct{}) - if _, err := testClient.Core().Nodes().Create(node); err != nil { + if _, err := testClient.CoreV1().Nodes().Create(node); err != nil { t.Fatalf("Failed to created node : %v", err) } go informers.Core().V1().Nodes().Informer().Run(podStopCh) - if _, err := testClient.Core().Pods(ns.Name).Create(pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod : %v", err) } @@ -332,7 +332,7 @@ func TestPodUpdateWithKeepTerminatedPodVolumes(t *testing.T) { pod.Status.Phase = v1.PodSucceeded - if _, err := testClient.Core().Pods(ns.Name).UpdateStatus(pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).UpdateStatus(pod); err != nil { t.Errorf("Failed to update pod : %v", err) } @@ -474,13 +474,13 @@ func TestPodAddedByDswp(t *testing.T) { pod := fakePodWithVol(namespaceName) podStopCh := make(chan struct{}) - if _, err := testClient.Core().Nodes().Create(node); err != nil { + if _, err := testClient.CoreV1().Nodes().Create(node); err != nil { t.Fatalf("Failed to created node : %v", err) } go informers.Core().V1().Nodes().Informer().Run(podStopCh) - if _, err := testClient.Core().Pods(ns.Name).Create(pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod : %v", err) } @@ -549,7 +549,7 @@ func TestPVCBoundWithADC(t *testing.T) { }, }, } - if _, err := testClient.Core().Nodes().Create(node); err != nil { + if _, err := testClient.CoreV1().Nodes().Create(node); err != nil { t.Fatalf("Failed to created node : %v", err) } @@ -557,10 +557,10 @@ func TestPVCBoundWithADC(t *testing.T) { pvcs := []*v1.PersistentVolumeClaim{} for i := 0; i < 3; i++ { pod, pvc := fakePodWithPVC(fmt.Sprintf("fakepod-pvcnotbound-%d", i), fmt.Sprintf("fakepvc-%d", i), namespaceName) - if _, err := testClient.Core().Pods(pod.Namespace).Create(pod); err != nil { + if _, err := testClient.CoreV1().Pods(pod.Namespace).Create(pod); err != nil { t.Errorf("Failed to create pod : %v", err) } - if _, err := testClient.Core().PersistentVolumeClaims(pvc.Namespace).Create(pvc); err != nil { + if _, err := testClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc); err != nil { t.Errorf("Failed to create pvc : %v", err) } pvcs = append(pvcs, pvc) @@ -568,7 +568,7 @@ func TestPVCBoundWithADC(t *testing.T) { // pod with no pvc podNew := fakePodWithVol(namespaceName) podNew.SetName("fakepod") - if _, err := testClient.Core().Pods(podNew.Namespace).Create(podNew); err != nil { + if _, err := testClient.CoreV1().Pods(podNew.Namespace).Create(podNew); err != nil { t.Errorf("Failed to create pod : %v", err) } @@ -608,7 +608,7 @@ func createPVForPVC(t *testing.T, testClient *clientset.Clientset, pvc *v1.Persi StorageClassName: *pvc.Spec.StorageClassName, }, } - if _, err := testClient.Core().PersistentVolumes().Create(pv); err != nil { + if _, err := testClient.CoreV1().PersistentVolumes().Create(pv); err != nil { t.Errorf("Failed to create pv : %v", err) } } diff --git a/test/integration/volume/persistent_volumes_test.go b/test/integration/volume/persistent_volumes_test.go index 63b5702039..ba1103634b 100644 --- a/test/integration/volume/persistent_volumes_test.go +++ b/test/integration/volume/persistent_volumes_test.go @@ -116,7 +116,7 @@ func TestPersistentVolumeRecycler(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -171,7 +171,7 @@ func TestPersistentVolumeDeleter(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -231,7 +231,7 @@ func TestPersistentVolumeBindRace(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -301,7 +301,7 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -382,7 +382,7 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -482,7 +482,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -572,7 +572,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) controllerStopCh := make(chan struct{}) informers.Start(controllerStopCh) @@ -862,7 +862,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes and StorageClasses). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) defer testClient.StorageV1().StorageClasses().DeleteCollection(nil, metav1.ListOptions{}) storageClass := storage.StorageClass{ @@ -957,7 +957,7 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -1025,7 +1025,7 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) { func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w watch.Interface, phase v1.PersistentVolumePhase) { // Check if the volume is already in requested phase - volume, err := client.Core().PersistentVolumes().Get(pvName, metav1.GetOptions{}) + volume, err := client.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) if err == nil && volume.Status.Phase == phase { return } @@ -1046,7 +1046,7 @@ func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w func waitForPersistentVolumeClaimPhase(client *clientset.Clientset, claimName, namespace string, w watch.Interface, phase v1.PersistentVolumeClaimPhase) { // Check if the claim is already in requested phase - claim, err := client.Core().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{}) + claim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{}) if err == nil && claim.Status.Phase == phase { return } diff --git a/test/utils/BUILD b/test/utils/BUILD index 08f839e691..d2707e1609 100644 --- a/test/utils/BUILD +++ b/test/utils/BUILD @@ -15,6 +15,7 @@ go_library( "delete_resources.go", "density_utils.go", "deployment.go", + "node.go", "paths.go", "pod_store.go", "replicaset.go", diff --git a/test/utils/image/manifest.go b/test/utils/image/manifest.go index 266945a4bb..a4ac5dfcd3 100644 --- a/test/utils/image/manifest.go +++ b/test/utils/image/manifest.go @@ -93,7 +93,7 @@ var ( // Preconfigured image configs var ( CRDConversionWebhook = Config{e2eRegistry, "crd-conversion-webhook", "1.13rev2"} - AdmissionWebhook = Config{e2eRegistry, "webhook", "1.13v1"} + AdmissionWebhook = Config{e2eRegistry, "webhook", "1.14v1"} APIServer = Config{e2eRegistry, "sample-apiserver", "1.10"} AppArmorLoader = Config{e2eRegistry, "apparmor-loader", "1.0"} BusyBox = Config{dockerLibraryRegistry, "busybox", "1.29"} diff --git a/pkg/api/v1/node/util.go b/test/utils/node.go similarity index 73% rename from pkg/api/v1/node/util.go rename to test/utils/node.go index c82ac87e51..9388251acd 100644 --- a/pkg/api/v1/node/util.go +++ b/test/utils/node.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,13 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// TODO: merge with pkg/util/node +package utils -package node - -import ( - "k8s.io/api/core/v1" -) +import "k8s.io/api/core/v1" // GetNodeCondition extracts the provided condition from the given status and returns that. // Returns nil and -1 if the condition is not present, and the index of the located condition. @@ -35,13 +31,3 @@ func GetNodeCondition(status *v1.NodeStatus, conditionType v1.NodeConditionType) } return -1, nil } - -// IsNodeReady returns true if a node is ready; false otherwise. -func IsNodeReady(node *v1.Node) bool { - for _, c := range node.Status.Conditions { - if c.Type == v1.NodeReady { - return c.Status == v1.ConditionTrue - } - } - return false -} diff --git a/translations/kubectl/ja_JP/LC_MESSAGES/k8s.mo b/translations/kubectl/ja_JP/LC_MESSAGES/k8s.mo index 9561e77ba2..84ce755d3a 100644 Binary files a/translations/kubectl/ja_JP/LC_MESSAGES/k8s.mo and b/translations/kubectl/ja_JP/LC_MESSAGES/k8s.mo differ diff --git a/translations/kubectl/ja_JP/LC_MESSAGES/k8s.po b/translations/kubectl/ja_JP/LC_MESSAGES/k8s.po index 2cf20b8df2..f779f2cf7e 100644 --- a/translations/kubectl/ja_JP/LC_MESSAGES/k8s.po +++ b/translations/kubectl/ja_JP/LC_MESSAGES/k8s.po @@ -6,87 +6,3308 @@ msgid "" msgstr "" "Project-Id-Version: gettext-go-examples-hello\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2013-12-12 20:03+0000\n" -"PO-Revision-Date: 2017-01-29 22:54-0800\n" +"Report-Msgid-Bugs-To: EMAIL\n" +"POT-Creation-Date: 2017-03-14 21:32-0700\n" +"PO-Revision-Date: 2019-02-14 10:33+0900\n" "Last-Translator: Giri Kuncoro \n" +"Language-Team: \n" +"Language: ja\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Generator: Poedit 1.6.10\n" +"X-Generator: Poedit 2.1.1\n" "X-Poedit-SourceCharset: UTF-8\n" -"Language-Team: \n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" -"Language: ja\n" + +#: pkg/kubectl/cmd/create_clusterrolebinding.go:35 +msgid "" +"\n" +"\t\t # Create a ClusterRoleBinding for user1, user2, and group1 using the " +"cluster-admin ClusterRole\n" +"\t\t kubectl create clusterrolebinding cluster-admin --clusterrole=cluster-" +"admin --user=user1 --user=user2 --group=group1" +msgstr "" +"\n" +"\t\t # Create a ClusterRoleBinding for user1, user2, and group1 using the " +"cluster-admin ClusterRole\n" +"\t\t kubectl create clusterrolebinding cluster-admin —clusterrole=cluster-" +"admin —user=user1 —user=user2 —group=group1" + +#: pkg/kubectl/cmd/create_rolebinding.go:35 +msgid "" +"\n" +"\t\t # Create a RoleBinding for user1, user2, and group1 using the admin " +"ClusterRole\n" +"\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +"user=user2 --group=group1" +msgstr "" +"\n" +"\t\t # Create a RoleBinding for user1, user2, and group1 using the admin " +"ClusterRole\n" +"\t\t kubectl create rolebinding admin —clusterrole=admin —user=user1 —" +"user=user2 —group=group1" + +#: pkg/kubectl/cmd/create_configmap.go:44 +msgid "" +"\n" +"\t\t # Create a new configmap named my-config based on folder bar\n" +"\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +"\n" +"\t\t # Create a new configmap named my-config with specified keys instead " +"of file basenames on disk\n" +"\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/file1." +"txt --from-file=key2=/path/to/bar/file2.txt\n" +"\n" +"\t\t # Create a new configmap named my-config with key1=config1 and " +"key2=config2\n" +"\t\t kubectl create configmap my-config --from-literal=key1=config1 --from-" +"literal=key2=config2" +msgstr "" +"\n" +"\t\t # Create a new configmap named my-config based on folder bar\n" +"\t\t kubectl create configmap my-config —from-file=path/to/bar\n" +"\n" +"\t\t # Create a new configmap named my-config with specified keys instead " +"of file basenames on disk\n" +"\t\t kubectl create configmap my-config —from-file=key1=/path/to/bar/file1." +"txt —from-file=key2=/path/to/bar/file2.txt\n" +"\n" +"\t\t # Create a new configmap named my-config with key1=config1 and " +"key2=config2\n" +"\t\t kubectl create configmap my-config —from-literal=key1=config1 —from-" +"literal=key2=config2" + +#: pkg/kubectl/cmd/create_secret.go:135 +msgid "" +"\n" +"\t\t # If you don't already have a .dockercfg file, you can create a " +"dockercfg secret directly by using:\n" +"\t\t kubectl create secret docker-registry my-secret --docker-" +"server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +"password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL" +msgstr "" +"\n" +"\t\t # If you don’t already have a .dockercfg file, you can create a " +"dockercfg secret directly by using:\n" +"\t\t kubectl create secret docker-registry my-secret —docker-" +"server=DOCKER_REGISTRY_SERVER —docker-username=DOCKER_USER —docker-" +"password=DOCKER_PASSWORD —docker-email=DOCKER_EMAIL" + +#: pkg/kubectl/cmd/top_node.go:65 +msgid "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" +msgstr "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" + +#: pkg/kubectl/cmd/apply.go:84 +msgid "" +"\n" +"\t\t# Apply the configuration in pod.json to a pod.\n" +"\t\tkubectl apply -f ./pod.json\n" +"\n" +"\t\t# Apply the JSON passed into stdin to a pod.\n" +"\t\tcat pod.json | kubectl apply -f -\n" +"\n" +"\t\t# Note: --prune is still in Alpha\n" +"\t\t# Apply the configuration in manifest.yaml that matches label app=nginx " +"and delete all the other resources that are not in the file and match label " +"app=nginx.\n" +"\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +"\n" +"\t\t# Apply the configuration in manifest.yaml and delete all the other " +"configmaps that are not in the file.\n" +"\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/" +"ConfigMap" +msgstr "" +"\n" +"\t\t# Apply the configuration in pod.json to a pod.\n" +"\t\tkubectl apply -f ./pod.json\n" +"\n" +"\t\t# Apply the JSON passed into stdin to a pod.\n" +"\t\tcat pod.json | kubectl apply -f -\n" +"\n" +"\t\t# Note: —prune is still in Alpha\n" +"\t\t# Apply the configuration in manifest.yaml that matches label app=nginx " +"and delete all the other resources that are not in the file and match label " +"app=nginx.\n" +"\t\tkubectl apply —prune -f manifest.yaml -l app=nginx\n" +"\n" +"\t\t# Apply the configuration in manifest.yaml and delete all the other " +"configmaps that are not in the file.\n" +"\t\tkubectl apply —prune -f manifest.yaml —all —prune-whitelist=core/v1/" +"ConfigMap" + +#: pkg/kubectl/cmd/autoscale.go:40 +#, c-format +msgid "" +"\n" +"\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 and " +"10, no target CPU utilization specified so a default autoscaling policy will " +"be used:\n" +"\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +"\n" +"\t\t# Auto scale a replication controller \"foo\", with the number of pods " +"between 1 and 5, target CPU utilization at 80%:\n" +"\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" +msgstr "" +"\n" +"\t\t# Auto scale a deployment “foo”, with the number of pods between 2 and " +"10, no target CPU utilization specified so a default autoscaling policy will " +"be used:\n" +"\t\tkubectl autoscale deployment foo —min=2 —max=10\n" +"\n" +"\t\t# Auto scale a replication controller “foo”, with the number of pods " +"between 1 and 5, target CPU utilization at 80%:\n" +"\t\tkubectl autoscale rc foo —max=5 —cpu-percent=80" + +#: pkg/kubectl/cmd/convert.go:49 +msgid "" +"\n" +"\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +"\t\tkubectl convert -f pod.yaml\n" +"\n" +"\t\t# Convert the live state of the resource specified by 'pod.yaml' to the " +"latest version\n" +"\t\t# and print to stdout in json format.\n" +"\t\tkubectl convert -f pod.yaml --local -o json\n" +"\n" +"\t\t# Convert all files under current directory to latest version and create " +"them all.\n" +"\t\tkubectl convert -f . | kubectl create -f -" +msgstr "" +"\n" +"\t\t# Convert ‘pod.yaml’ to latest version and print to stdout.\n" +"\t\tkubectl convert -f pod.yaml\n" +"\n" +"\t\t# Convert the live state of the resource specified by ‘pod.yaml’ to the " +"latest version\n" +"\t\t# and print to stdout in json format.\n" +"\t\tkubectl convert -f pod.yaml —local -o json\n" +"\n" +"\t\t# Convert all files under current directory to latest version and create " +"them all.\n" +"\t\tkubectl convert -f . | kubectl create -f -" + +#: pkg/kubectl/cmd/create_clusterrole.go:34 +msgid "" +"\n" +"\t\t# Create a ClusterRole named \"pod-reader\" that allows user to perform " +"\"get\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +"resource=pods\n" +"\n" +"\t\t# Create a ClusterRole named \"pod-reader\" with ResourceName specified\n" +"\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +"resource=pods --resource-name=readablepod" +msgstr "" +"\n" +"\t\t# Create a ClusterRole named “pod-reader” that allows user to perform " +"“get”, “watch” and “list” on pods\n" +"\t\tkubectl create clusterrole pod-reader —verb=get,list,watch —" +"resource=pods\n" +"\n" +"\t\t# Create a ClusterRole named “pod-reader” with ResourceName specified\n" +"\t\tkubectl create clusterrole pod-reader —verb=get,list,watch —" +"resource=pods —resource-name=readablepod" + +#: pkg/kubectl/cmd/create_role.go:41 +msgid "" +"\n" +"\t\t# Create a Role named \"pod-reader\" that allows user to perform \"get" +"\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +"resource=pods\n" +"\n" +"\t\t# Create a Role named \"pod-reader\" with ResourceName specified\n" +"\t\tkubectl create role pod-reader --verb=get --verg=list --verb=watch --" +"resource=pods --resource-name=readablepod" +msgstr "" +"\n" +"\t\t# Create a Role named “pod-reader” that allows user to perform “get”, " +"“watch” and “list” on pods\n" +"\t\tkubectl create role pod-reader —verb=get —verb=list —verb=watch —" +"resource=pods\n" +"\n" +"\t\t# Create a Role named “pod-reader” with ResourceName specified\n" +"\t\tkubectl create role pod-reader —verb=get —verg=list —verb=watch —" +"resource=pods —resource-name=readablepod" + +#: pkg/kubectl/cmd/create_quota.go:35 +msgid "" +"\n" +"\t\t# Create a new resourcequota named my-quota\n" +"\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3," +"replicationcontrollers=2,resourcequotas=1,secrets=5," +"persistentvolumeclaims=10\n" +"\n" +"\t\t# Create a new resourcequota named best-effort\n" +"\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" +msgstr "" +"\n" +"\t\t# Create a new resourcequota named my-quota\n" +"\t\tkubectl create quota my-quota —hard=cpu=1,memory=1G,pods=2,services=3," +"replicationcontrollers=2,resourcequotas=1,secrets=5," +"persistentvolumeclaims=10\n" +"\n" +"\t\t# Create a new resourcequota named best-effort\n" +"\t\tkubectl create quota best-effort —hard=pods=100 —scopes=BestEffort" + +#: pkg/kubectl/cmd/create_pdb.go:35 +#, c-format +msgid "" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=rails label\n" +"\t\t# and require at least one of them being available at any point in " +"time.\n" +"\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +"available=1\n" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=nginx label\n" +"\t\t# and require at least half of the pods selected to be available at any " +"point in time.\n" +"\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" +msgstr "" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=rails label\n" +"\t\t# and require at least one of them being available at any point in " +"time.\n" +"\t\tkubectl create poddisruptionbudget my-pdb —selector=app=rails —min-" +"available=1\n" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=nginx label\n" +"\t\t# and require at least half of the pods selected to be available at any " +"point in time.\n" +"\t\tkubectl create pdb my-pdb —selector=app=nginx —min-available=50%" + +#: pkg/kubectl/cmd/create.go:47 +msgid "" +"\n" +"\t\t# Create a pod using the data in pod.json.\n" +"\t\tkubectl create -f ./pod.json\n" +"\n" +"\t\t# Create a pod based on the JSON passed into stdin.\n" +"\t\tcat pod.json | kubectl create -f -\n" +"\n" +"\t\t# Edit the data in docker-registry.yaml in JSON using the v1 API format " +"then create the resource using the edited data.\n" +"\t\tkubectl create -f docker-registry.yaml --edit --output-version=v1 -o json" +msgstr "" +"\n" +"\t\t# Create a pod using the data in pod.json.\n" +"\t\tkubectl create -f ./pod.json\n" +"\n" +"\t\t# Create a pod based on the JSON passed into stdin.\n" +"\t\tcat pod.json | kubectl create -f -\n" +"\n" +"\t\t# Edit the data in docker-registry.yaml in JSON using the v1 API format " +"then create the resource using the edited data.\n" +"\t\tkubectl create -f docker-registry.yaml —edit —output-version=v1 -o json" + +#: pkg/kubectl/cmd/expose.go:53 +msgid "" +"\n" +"\t\t# Create a service for a replicated nginx, which serves on port 80 and " +"connects to the containers on port 8000.\n" +"\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a replication controller identified by type and " +"name specified in \"nginx-controller.yaml\", which serves on port 80 and " +"connects to the containers on port 8000.\n" +"\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +"the name \"frontend\"\n" +"\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +"\n" +"\t\t# Create a second service based on the above service, exposing the " +"container port 8443 as port 443 with the name \"nginx-https\"\n" +"\t\tkubectl expose service nginx --port=443 --target-port=8443 --name=nginx-" +"https\n" +"\n" +"\t\t# Create a service for a replicated streaming application on port 4100 " +"balancing UDP traffic and named 'video-stream'.\n" +"\t\tkubectl expose rc streamer --port=4100 --protocol=udp --name=video-" +"stream\n" +"\n" +"\t\t# Create a service for a replicated nginx using replica set, which " +"serves on port 80 and connects to the containers on port 8000.\n" +"\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for an nginx deployment, which serves on port 80 and " +"connects to the containers on port 8000.\n" +"\t\tkubectl expose deployment nginx --port=80 --target-port=8000" +msgstr "" +"\n" +"\t\t# Create a service for a replicated nginx, which serves on port 80 and " +"connects to the containers on port 8000.\n" +"\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a replication controller identified by type and " +"name specified in \"nginx-controller.yaml\", which serves on port 80 and " +"connects to the containers on port 8000.\n" +"\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +"the name \"frontend\"\n" +"\t\tkubectl expose pod valid-pod —port=444 —name=frontend\n" +"\n" +"\t\t# Create a second service based on the above service, exposing the " +"container port 8443 as port 443 with the name “nginx-https”\n" +"\t\tkubectl expose service nginx —port=443 —target-port=8443 —name=nginx-" +"https\n" +"\n" +"\t\t# Create a service for a replicated streaming application on port 4100 " +"balancing UDP traffic and named ‘video-stream’.\n" +"\t\tkubectl expose rc streamer —port=4100 —protocol=udp —name=video-stream\n" +"\n" +"\t\t# Create a service for a replicated nginx using replica set, which " +"serves on port 80 and connects to the containers on port 8000.\n" +"\t\tkubectl expose rs nginx —port=80 —target-port=8000\n" +"\n" +"\t\t# Create a service for an nginx deployment, which serves on port 80 and " +"connects to the containers on port 8000.\n" +"\t\tkubectl expose deployment nginx —port=80 —target-port=8000" + +#: pkg/kubectl/cmd/delete.go:68 +msgid "" +"\n" +"\t\t# Delete a pod using the type and name specified in pod.json.\n" +"\t\tkubectl delete -f ./pod.json\n" +"\n" +"\t\t# Delete a pod based on the type and name in the JSON passed into " +"stdin.\n" +"\t\tcat pod.json | kubectl delete -f -\n" +"\n" +"\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +"\t\tkubectl delete pod,service baz foo\n" +"\n" +"\t\t# Delete pods and services with label name=myLabel.\n" +"\t\tkubectl delete pods,services -l name=myLabel\n" +"\n" +"\t\t# Delete a pod with minimal delay\n" +"\t\tkubectl delete pod foo --now\n" +"\n" +"\t\t# Force delete a pod on a dead node\n" +"\t\tkubectl delete pod foo --grace-period=0 --force\n" +"\n" +"\t\t# Delete all pods\n" +"\t\tkubectl delete pods --all" +msgstr "" +"\n" +"\t\t# Delete a pod using the type and name specified in pod.json.\n" +"\t\tkubectl delete -f ./pod.json\n" +"\n" +"\t\t# Delete a pod based on the type and name in the JSON passed into " +"stdin.\n" +"\t\tcat pod.json | kubectl delete -f -\n" +"\n" +"\t\t# Delete pods and services with same names “baz” and “foo”\n" +"\t\tkubectl delete pod,service baz foo\n" +"\n" +"\t\t# Delete pods and services with label name=myLabel.\n" +"\t\tkubectl delete pods,services -l name=myLabel\n" +"\n" +"\t\t# Delete a pod with minimal delay\n" +"\t\tkubectl delete pod foo —now\n" +"\n" +"\t\t# Force delete a pod on a dead node\n" +"\t\tkubectl delete pod foo —grace-period=0 —force\n" +"\n" +"\t\t# Delete all pods\n" +"\t\tkubectl delete pods —all" + +#: pkg/kubectl/cmd/describe.go:54 +msgid "" +"\n" +"\t\t# Describe a node\n" +"\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +"\n" +"\t\t# Describe a pod\n" +"\t\tkubectl describe pods/nginx\n" +"\n" +"\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +"\t\tkubectl describe -f pod.json\n" +"\n" +"\t\t# Describe all pods\n" +"\t\tkubectl describe pods\n" +"\n" +"\t\t# Describe pods by label name=myLabel\n" +"\t\tkubectl describe po -l name=myLabel\n" +"\n" +"\t\t# Describe all pods managed by the 'frontend' replication controller (rc-" +"created pods\n" +"\t\t# get the name of the rc as a prefix in the pod the name).\n" +"\t\tkubectl describe pods frontend" +msgstr "" +"\n" +"\t\t# Describe a node\n" +"\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +"\n" +"\t\t# Describe a pod\n" +"\t\tkubectl describe pods/nginx\n" +"\n" +"\t\t# Describe a pod identified by type and name in “pod.json”\n" +"\t\tkubectl describe -f pod.json\n" +"\n" +"\t\t# Describe all pods\n" +"\t\tkubectl describe pods\n" +"\n" +"\t\t# Describe pods by label name=myLabel\n" +"\t\tkubectl describe po -l name=myLabel\n" +"\n" +"\t\t# Describe all pods managed by the ‘frontend’ replication controller (rc-" +"created pods\n" +"\t\t# get the name of the rc as a prefix in the pod the name).\n" +"\t\tkubectl describe pods frontend" + +#: pkg/kubectl/cmd/drain.go:165 +msgid "" +"\n" +"\t\t# Drain node \"foo\", even if there are pods not managed by a " +"ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet on it.\n" +"\t\t$ kubectl drain foo --force\n" +"\n" +"\t\t# As above, but abort if there are pods not managed by a " +"ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet, and use a " +"grace period of 15 minutes.\n" +"\t\t$ kubectl drain foo --grace-period=900" +msgstr "" +"\n" +"\t\t# Drain node “foo”, even if there are pods not managed by a " +"ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet on it.\n" +"\t\t$ kubectl drain foo —force\n" +"\n" +"\t\t# As above, but abort if there are pods not managed by a " +"ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet, and use a " +"grace period of 15 minutes.\n" +"\t\t$ kubectl drain foo —grace-period=900" + +#: pkg/kubectl/cmd/edit.go:80 +msgid "" +"\n" +"\t\t# Edit the service named 'docker-registry':\n" +"\t\tkubectl edit svc/docker-registry\n" +"\n" +"\t\t# Use an alternative editor\n" +"\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +"\n" +"\t\t# Edit the job 'myjob' in JSON using the v1 API format:\n" +"\t\tkubectl edit job.v1.batch/myjob -o json\n" +"\n" +"\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +"config in its annotation:\n" +"\t\tkubectl edit deployment/mydeployment -o yaml --save-config" +msgstr "" +"\n" +"\t\t# Edit the service named ‘docker-registry’:\n" +"\t\tkubectl edit svc/docker-registry\n" +"\n" +"\t\t# Use an alternative editor\n" +"\t\tKUBE_EDITOR=“nano” kubectl edit svc/docker-registry\n" +"\n" +"\t\t# Edit the job ‘myjob’ in JSON using the v1 API format:\n" +"\t\tkubectl edit job.v1.batch/myjob -o json\n" +"\n" +"\t\t# Edit the deployment ‘mydeployment’ in YAML and save the modified " +"config in its annotation:\n" +"\t\tkubectl edit deployment/mydeployment -o yaml —save-config" + +#: pkg/kubectl/cmd/exec.go:41 +msgid "" +"\n" +"\t\t# Get output from running 'date' from pod 123456-7890, using the first " +"container by default\n" +"\t\tkubectl exec 123456-7890 date\n" +"\n" +"\t\t# Get output from running 'date' in ruby-container from pod 123456-7890\n" +"\t\tkubectl exec 123456-7890 -c ruby-container date\n" +"\n" +"\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-container " +"from pod 123456-7890\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl exec 123456-7890 -c ruby-container -i -t -- bash -il" +msgstr "" +"\n" +"\t\t# Get output from running ‘date’ from pod 123456-7890, using the first " +"container by default\n" +"\t\tkubectl exec 123456-7890 date\n" +"\n" +"\t\t# Get output from running ‘date’ in ruby-container from pod 123456-7890\n" +"\t\tkubectl exec 123456-7890 -c ruby-container date\n" +"\n" +"\t\t# Switch to raw terminal mode, sends stdin to ‘bash’ in ruby-container " +"from pod 123456-7890\n" +"\t\t# and sends stdout/stderr from ‘bash’ back to the client\n" +"\t\tkubectl exec 123456-7890 -c ruby-container -i -t — bash -il" + +#: pkg/kubectl/cmd/attach.go:42 +msgid "" +"\n" +"\t\t# Get output from running pod 123456-7890, using the first container by " +"default\n" +"\t\tkubectl attach 123456-7890\n" +"\n" +"\t\t# Get output from ruby-container from pod 123456-7890\n" +"\t\tkubectl attach 123456-7890 -c ruby-container\n" +"\n" +"\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-container " +"from pod 123456-7890\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +"\n" +"\t\t# Get output from the first pod of a ReplicaSet named nginx\n" +"\t\tkubectl attach rs/nginx\n" +"\t\t" +msgstr "" +"\n" +"\t\t# Get output from running pod 123456-7890, using the first container by " +"default\n" +"\t\tkubectl attach 123456-7890\n" +"\n" +"\t\t# Get output from ruby-container from pod 123456-7890\n" +"\t\tkubectl attach 123456-7890 -c ruby-container\n" +"\n" +"\t\t# Switch to raw terminal mode, sends stdin to ‘bash’ in ruby-container " +"from pod 123456-7890\n" +"\t\t# and sends stdout/stderr from ‘bash’ back to the client\n" +"\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +"\n" +"\t\t# Get output from the first pod of a ReplicaSet named nginx\n" +"\t\tkubectl attach rs/nginx\n" +"\t\t" + +#: pkg/kubectl/cmd/explain.go:39 +msgid "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" +msgstr "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" + +#: pkg/kubectl/cmd/completion.go:65 +msgid "" +"\n" +"\t\t# Install bash completion on a Mac using homebrew\n" +"\t\tbrew install bash-completion\n" +"\t\tprintf \"\n" +"# Bash completion support\n" +"source $(brew --prefix)/etc/bash_completion\n" +"\" >> $HOME/.bash_profile\n" +"\t\tsource $HOME/.bash_profile\n" +"\n" +"\t\t# Load the kubectl completion code for bash into the current shell\n" +"\t\tsource <(kubectl completion bash)\n" +"\n" +"\t\t# Write bash completion code to a file and source if from .bash_profile\n" +"\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +"\t\tprintf \"\n" +"# Kubectl shell completion\n" +"source '$HOME/.kube/completion.bash.inc'\n" +"\" >> $HOME/.bash_profile\n" +"\t\tsource $HOME/.bash_profile\n" +"\n" +"\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +"\t\tsource <(kubectl completion zsh)" +msgstr "" +"\n" +"\t\t# Install bash completion on a Mac using homebrew\n" +"\t\tbrew install bash-completion\n" +"\t\tprintf \"\n" +"# Bash completion support\n" +"source $(brew --prefix)/etc/bash_completion\n" +"\" >> $HOME/.bash_profile\n" +"\t\tsource $HOME/.bash_profile\n" +"\n" +"\t\t# Load the kubectl completion code for bash into the current shell\n" +"\t\tsource <(kubectl completion bash)\n" +"\n" +"\t\t# Write bash completion code to a file and source if from .bash_profile\n" +"\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +"\t\tprintf “\n" +"# Kubectl shell completion\n" +"source ‘$HOME/.kube/completion.bash.inc’\n" +"“ >> $HOME/.bash_profile\n" +"\t\tsource $HOME/.bash_profile\n" +"\n" +"\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +"\t\tsource <(kubectl completion zsh)" + +#: pkg/kubectl/cmd/get.go:64 +msgid "" +"\n" +"\t\t# List all pods in ps output format.\n" +"\t\tkubectl get pods\n" +"\n" +"\t\t# List all pods in ps output format with more information (such as node " +"name).\n" +"\t\tkubectl get pods -o wide\n" +"\n" +"\t\t# List a single replication controller with specified NAME in ps output " +"format.\n" +"\t\tkubectl get replicationcontroller web\n" +"\n" +"\t\t# List a single pod in JSON output format.\n" +"\t\tkubectl get -o json pod web-pod-13je7\n" +"\n" +"\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +"JSON output format.\n" +"\t\tkubectl get -f pod.yaml -o json\n" +"\n" +"\t\t# Return only the phase value of the specified pod.\n" +"\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status.phase}}\n" +"\n" +"\t\t# List all replication controllers and services together in ps output " +"format.\n" +"\t\tkubectl get rc,services\n" +"\n" +"\t\t# List one or more resources by their type and names.\n" +"\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +"\n" +"\t\t# List all resources with different types.\n" +"\t\tkubectl get all" +msgstr "" +"\n" +"\t\t# List all pods in ps output format.\n" +"\t\tkubectl get pods\n" +"\n" +"\t\t# List all pods in ps output format with more information (such as node " +"name).\n" +"\t\tkubectl get pods -o wide\n" +"\n" +"\t\t# List a single replication controller with specified NAME in ps output " +"format.\n" +"\t\tkubectl get replicationcontroller web\n" +"\n" +"\t\t# List a single pod in JSON output format.\n" +"\t\tkubectl get -o json pod web-pod-13je7\n" +"\n" +"\t\t# List a pod identified by type and name specified in “pod.yaml” in JSON " +"output format.\n" +"\t\tkubectl get -f pod.yaml -o json\n" +"\n" +"\t\t# Return only the phase value of the specified pod.\n" +"\t\tkubectl get -o template pod/web-pod-13je7 —template={{.status.phase}}\n" +"\n" +"\t\t# List all replication controllers and services together in ps output " +"format.\n" +"\t\tkubectl get rc,services\n" +"\n" +"\t\t# List one or more resources by their type and names.\n" +"\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +"\n" +"\t\t# List all resources with different types.\n" +"\t\tkubectl get all" + +#: pkg/kubectl/cmd/portforward.go:53 +msgid "" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in the pod\n" +"\t\tkubectl port-forward mypod 5000 6000\n" +"\n" +"\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward mypod 8888:5000\n" +"\n" +"\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward mypod :5000\n" +"\n" +"\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward mypod 0:5000" +msgstr "" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in the pod\n" +"\t\tkubectl port-forward mypod 5000 6000\n" +"\n" +"\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward mypod 8888:5000\n" +"\n" +"\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward mypod :5000\n" +"\n" +"\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward mypod 0:5000" + +#: pkg/kubectl/cmd/drain.go:118 +msgid "" +"\n" +"\t\t# Mark node \"foo\" as schedulable.\n" +"\t\t$ kubectl uncordon foo" +msgstr "" +"\n" +"\t\t# Mark node “foo” as schedulable.\n" +"\t\t$ kubectl uncordon foo" + +#: pkg/kubectl/cmd/drain.go:93 +msgid "" +"\n" +"\t\t# Mark node \"foo\" as unschedulable.\n" +"\t\tkubectl cordon foo" +msgstr "" +"\n" +"\t\t# Mark node “foo” as unschedulable.\n" +"\t\tkubectl cordon foo" + +#: pkg/kubectl/cmd/patch.go:66 +msgid "" +"\n" +"\t\t# Partially update a node using strategic merge patch\n" +"\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Partially update a node identified by the type and name specified in " +"\"node.json\" using strategic merge patch\n" +"\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Update a container's image; spec.containers[*].name is required " +"because it's a merge key\n" +"\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +"\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +"\n" +"\t\t# Update a container's image using a json patch with positional arrays\n" +"\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +"\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" +msgstr "" +"\n" +"\t\t# Partially update a node using strategic merge patch\n" +"\t\tkubectl patch node k8s-node-1 -p ‘{“spec”:{“unschedulable”:true}}’\n" +"\n" +"\t\t# Partially update a node identified by the type and name specified in " +"“node.json” using strategic merge patch\n" +"\t\tkubectl patch -f node.json -p ‘{“spec”:{“unschedulable”:true}}’\n" +"\n" +"\t\t# Update a container’s image; spec.containers[*].name is required " +"because it’s a merge key\n" +"\t\tkubectl patch pod valid-pod -p ‘{“spec”:{“containers”:" +"[{“name”:”kubernetes-serve-hostname”,”image”:”new image”}]}}’\n" +"\n" +"\t\t# Update a container’s image using a json patch with positional arrays\n" +"\t\tkubectl patch pod valid-pod —type=‘json’ -p=‘[{“op”: “replace”, “path”: " +"“/spec/containers/0/image”, “value”:”new image”}]’" + +#: pkg/kubectl/cmd/options.go:29 +msgid "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" +msgstr "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" + +#: pkg/kubectl/cmd/clusterinfo.go:41 +msgid "" +"\n" +"\t\t# Print the address of the master and cluster services\n" +"\t\tkubectl cluster-info" +msgstr "" +"\n" +"\t\t# Print the address of the master and cluster services\n" +"\t\tkubectl cluster-info" + +#: pkg/kubectl/cmd/version.go:32 +msgid "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" +msgstr "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" + +#: pkg/kubectl/cmd/apiversions.go:34 +msgid "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" +msgstr "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" + +#: pkg/kubectl/cmd/replace.go:50 +msgid "" +"\n" +"\t\t# Replace a pod using the data in pod.json.\n" +"\t\tkubectl replace -f ./pod.json\n" +"\n" +"\t\t# Replace a pod based on the JSON passed into stdin.\n" +"\t\tcat pod.json | kubectl replace -f -\n" +"\n" +"\t\t# Update a single-container pod's image version (tag) to v4\n" +"\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' | " +"kubectl replace -f -\n" +"\n" +"\t\t# Force replace, delete and then re-create the resource\n" +"\t\tkubectl replace --force -f ./pod.json" +msgstr "" +"\n" +"\t\t# Replace a pod using the data in pod.json.\n" +"\t\tkubectl replace -f ./pod.json\n" +"\n" +"\t\t# Replace a pod based on the JSON passed into stdin.\n" +"\t\tcat pod.json | kubectl replace -f -\n" +"\n" +"\t\t# Update a single-container pod’s image version (tag) to v4\n" +"\t\tkubectl get pod mypod -o yaml | sed ’s/\\(image: myimage\\):.*$/:v4/‘ | " +"kubectl replace -f -\n" +"\n" +"\t\t# Force replace, delete and then re-create the resource\n" +"\t\tkubectl replace —force -f ./pod.json" + +#: pkg/kubectl/cmd/logs.go:40 +msgid "" +"\n" +"\t\t# Return snapshot logs from pod nginx with only one container\n" +"\t\tkubectl logs nginx\n" +"\n" +"\t\t# Return snapshot logs for the pods defined by label app=nginx\n" +"\t\tkubectl logs -lapp=nginx\n" +"\n" +"\t\t# Return snapshot of previous terminated ruby container logs from pod " +"web-1\n" +"\t\tkubectl logs -p -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +"\t\tkubectl logs -f -c ruby web-1\n" +"\n" +"\t\t# Display only the most recent 20 lines of output in pod nginx\n" +"\t\tkubectl logs --tail=20 nginx\n" +"\n" +"\t\t# Show all logs from pod nginx written in the last hour\n" +"\t\tkubectl logs --since=1h nginx\n" +"\n" +"\t\t# Return snapshot logs from first container of a job named hello\n" +"\t\tkubectl logs job/hello\n" +"\n" +"\t\t# Return snapshot logs from container nginx-1 of a deployment named " +"nginx\n" +"\t\tkubectl logs deployment/nginx -c nginx-1" +msgstr "" +"\n" +"\t\t# Return snapshot logs from pod nginx with only one container\n" +"\t\tkubectl logs nginx\n" +"\n" +"\t\t# Return snapshot logs for the pods defined by label app=nginx\n" +"\t\tkubectl logs -lapp=nginx\n" +"\n" +"\t\t# Return snapshot of previous terminated ruby container logs from pod " +"web-1\n" +"\t\tkubectl logs -p -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +"\t\tkubectl logs -f -c ruby web-1\n" +"\n" +"\t\t# Display only the most recent 20 lines of output in pod nginx\n" +"\t\tkubectl logs —tail=20 nginx\n" +"\n" +"\t\t# Show all logs from pod nginx written in the last hour\n" +"\t\tkubectl logs —since=1h nginx\n" +"\n" +"\t\t# Return snapshot logs from first container of a job named hello\n" +"\t\tkubectl logs job/hello\n" +"\n" +"\t\t# Return snapshot logs from container nginx-1 of a deployment named " +"nginx\n" +"\t\tkubectl logs deployment/nginx -c nginx-1" + +#: pkg/kubectl/cmd/proxy.go:53 +msgid "" +"\n" +"\t\t# Run a proxy to kubernetes apiserver on port 8011, serving static " +"content from ./local/www/\n" +"\t\tkubectl proxy --port=8011 --www=./local/www/\n" +"\n" +"\t\t# Run a proxy to kubernetes apiserver on an arbitrary local port.\n" +"\t\t# The chosen port for the server will be output to stdout.\n" +"\t\tkubectl proxy --port=0\n" +"\n" +"\t\t# Run a proxy to kubernetes apiserver, changing the api prefix to k8s-" +"api\n" +"\t\t# This makes e.g. the pods api available at localhost:8001/k8s-api/v1/" +"pods/\n" +"\t\tkubectl proxy --api-prefix=/k8s-api" +msgstr "" +"\n" +"\t\t# Run a proxy to kubernetes apiserver on port 8011, serving static " +"content from ./local/www/\n" +"\t\tkubectl proxy —port=8011 —www=./local/www/\n" +"\n" +"\t\t# Run a proxy to kubernetes apiserver on an arbitrary local port.\n" +"\t\t# The chosen port for the server will be output to stdout.\n" +"\t\tkubectl proxy —port=0\n" +"\n" +"\t\t# Run a proxy to kubernetes apiserver, changing the api prefix to k8s-" +"api\n" +"\t\t# This makes e.g. the pods api available at localhost:8001/k8s-api/v1/" +"pods/\n" +"\t\tkubectl proxy —api-prefix=/k8s-api" + +#: pkg/kubectl/cmd/scale.go:43 +msgid "" +"\n" +"\t\t# Scale a replicaset named 'foo' to 3.\n" +"\t\tkubectl scale --replicas=3 rs/foo\n" +"\n" +"\t\t# Scale a resource identified by type and name specified in \"foo.yaml\" " +"to 3.\n" +"\t\tkubectl scale --replicas=3 -f foo.yaml\n" +"\n" +"\t\t# If the deployment named mysql's current size is 2, scale mysql to 3.\n" +"\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +"\n" +"\t\t# Scale multiple replication controllers.\n" +"\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +"\n" +"\t\t# Scale job named 'cron' to 3.\n" +"\t\tkubectl scale --replicas=3 job/cron" +msgstr "" +"\n" +"\t\t# Scale a replicaset named ‘foo’ to 3.\n" +"\t\tkubectl scale —replicas=3 rs/foo\n" +"\n" +"\t\t# Scale a resource identified by type and name specified in “foo.yaml” " +"to 3.\n" +"\t\tkubectl scale —replicas=3 -f foo.yaml\n" +"\n" +"\t\t# If the deployment named mysql’s current size is 2, scale mysql to 3.\n" +"\t\tkubectl scale —current-replicas=2 —replicas=3 deployment/mysql\n" +"\n" +"\t\t# Scale multiple replication controllers.\n" +"\t\tkubectl scale —replicas=5 rc/foo rc/bar rc/baz\n" +"\n" +"\t\t# Scale job named ‘cron’ to 3.\n" +"\t\tkubectl scale —replicas=3 job/cron" + +#: pkg/kubectl/cmd/apply_set_last_applied.go:67 +msgid "" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file.\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml\n" +"\n" +"\t\t# Execute set-last-applied against each configuration file in a " +"directory.\n" +"\t\tkubectl apply set-last-applied -f path/\n" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file, will create the annotation if it does not already exist.\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml --create-annotation=true\n" +"\t\t" +msgstr "" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file.\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml\n" +"\n" +"\t\t# Execute set-last-applied against each configuration file in a " +"directory.\n" +"\t\tkubectl apply set-last-applied -f path/\n" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file, will create the annotation if it does not already exist.\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml —create-annotation=true\n" +"\t\t" + +#: pkg/kubectl/cmd/top_pod.go:61 +msgid "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" +msgstr "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod —namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME —containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" + +#: pkg/kubectl/cmd/stop.go:40 +msgid "" +"\n" +"\t\t# Shut down foo.\n" +"\t\tkubectl stop replicationcontroller foo\n" +"\n" +"\t\t# Stop pods and services with label name=myLabel.\n" +"\t\tkubectl stop pods,services -l name=myLabel\n" +"\n" +"\t\t# Shut down the service defined in service.json\n" +"\t\tkubectl stop -f service.json\n" +"\n" +"\t\t# Shut down all resources in the path/to/resources directory\n" +"\t\tkubectl stop -f path/to/resources" +msgstr "" +"\n" +"\t\t# Shut down foo.\n" +"\t\tkubectl stop replicationcontroller foo\n" +"\n" +"\t\t# Stop pods and services with label name=myLabel.\n" +"\t\tkubectl stop pods,services -l name=myLabel\n" +"\n" +"\t\t# Shut down the service defined in service.json\n" +"\t\tkubectl stop -f service.json\n" +"\n" +"\t\t# Shut down all resources in the path/to/resources directory\n" +"\t\tkubectl stop -f path/to/resources" + +#: pkg/kubectl/cmd/run.go:57 +msgid "" +"\n" +"\t\t# Start a single instance of nginx.\n" +"\t\tkubectl run nginx --image=nginx\n" +"\n" +"\t\t# Start a single instance of hazelcast and let the container expose port " +"5701 .\n" +"\t\tkubectl run hazelcast --image=hazelcast --port=5701\n" +"\n" +"\t\t# Start a single instance of hazelcast and set environment variables " +"\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container.\n" +"\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" --" +"env=\"POD_NAMESPACE=default\"\n" +"\n" +"\t\t# Start a replicated instance of nginx.\n" +"\t\tkubectl run nginx --image=nginx --replicas=5\n" +"\n" +"\t\t# Dry run. Print the corresponding API objects without creating them.\n" +"\t\tkubectl run nginx --image=nginx --dry-run\n" +"\n" +"\t\t# Start a single instance of nginx, but overload the spec of the " +"deployment with a partial set of values parsed from JSON.\n" +"\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": \"v1\", " +"\"spec\": { ... } }'\n" +"\n" +"\t\t# Start a pod of busybox and keep it in the foreground, don't restart it " +"if it exits.\n" +"\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +"\n" +"\t\t# Start the nginx container using the default command, but use custom " +"arguments (arg1 .. argN) for that command.\n" +"\t\tkubectl run nginx --image=nginx -- ... \n" +"\n" +"\t\t# Start the nginx container using a different command and custom " +"arguments.\n" +"\t\tkubectl run nginx --image=nginx --command -- ... \n" +"\n" +"\t\t# Start the perl container to compute π to 2000 places and print it " +"out.\n" +"\t\tkubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -" +"wle 'print bpi(2000)'\n" +"\n" +"\t\t# Start the cron job to compute π to 2000 places and print it out every " +"5 minutes.\n" +"\t\tkubectl run pi --schedule=\"0/5 * * * ?\" --image=perl --" +"restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'" +msgstr "" +"\n" +"\t\t# Start a single instance of nginx.\n" +"\t\tkubectl run nginx --image=nginx\n" +"\n" +"\t\t# Start a single instance of hazelcast and let the container expose port " +"5701 .\n" +"\t\tkubectl run hazelcast --image=hazelcast --port=5701\n" +"\n" +"\t\t# Start a single instance of hazelcast and set environment variables " +"\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container.\n" +"\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" --" +"env=\"POD_NAMESPACE=default\"\n" +"\n" +"\t\t# Start a replicated instance of nginx.\n" +"\t\tkubectl run nginx --image=nginx --replicas=5\n" +"\n" +"\t\t# Dry run. Print the corresponding API objects without creating them.\n" +"\t\tkubectl run nginx --image=nginx --dry-run\n" +"\n" +"\t\t# Start a single instance of nginx, but overload the spec of the " +"deployment with a partial set of values parsed from JSON.\n" +"\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": \"v1\", " +"\"spec\": { ... } }'\n" +"\n" +"\t\t# Start a pod of busybox and keep it in the foreground, don't restart it " +"if it exits.\n" +"\t\tkubectl run -i -t busybox —image=busybox —restart=Never\n" +"\n" +"\t\t# Start the nginx container using the default command, but use custom " +"arguments (arg1 .. argN) for that command.\n" +"\t\tkubectl run nginx —image=nginx — \n" +"\n" +"\t\t# Start the nginx container using a different command and custom " +"arguments.\n" +"\t\tkubectl run nginx —image=nginx —command — \n" +"\n" +"\t\t# Start the perl container to compute π to 2000 places and print it " +"out.\n" +"\t\tkubectl run pi —image=perl —restart=OnFailure — perl -Mbignum=bpi -wle " +"‘print bpi(2000)’\n" +"\n" +"\t\t# Start the cron job to compute π to 2000 places and print it out every " +"5 minutes.\n" +"\t\tkubectl run pi —schedule=“0/5 * * * ?” —image=perl —restart=OnFailure — " +"perl -Mbignum=bpi -wle ‘print bpi(2000)’" + +#: pkg/kubectl/cmd/taint.go:67 +msgid "" +"\n" +"\t\t# Update node 'foo' with a taint with key 'dedicated' and value 'special-" +"user' and effect 'NoSchedule'.\n" +"\t\t# If a taint with that key and effect already exists, its value is " +"replaced as specified.\n" +"\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +"\n" +"\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +"'NoSchedule' if one exists.\n" +"\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +"\n" +"\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +"\t\tkubectl taint nodes foo dedicated-" +msgstr "" +"\n" +"\t\t# Update node ‘foo’ with a taint with key ‘dedicated’ and value ‘special-" +"user’ and effect ‘NoSchedule’.\n" +"\t\t# If a taint with that key and effect already exists, its value is " +"replaced as specified.\n" +"\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +"\n" +"\t\t# Remove from node ‘foo’ the taint with key ‘dedicated’ and effect " +"‘NoSchedule’ if one exists.\n" +"\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +"\n" +"\t\t# Remove from node ‘foo’ all the taints with key ‘dedicated’\n" +"\t\tkubectl taint nodes foo dedicated-" + +#: pkg/kubectl/cmd/label.go:77 +msgid "" +"\n" +"\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'.\n" +"\t\tkubectl label pods foo unhealthy=true\n" +"\n" +"\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +"overwriting any existing value.\n" +"\t\tkubectl label --overwrite pods foo status=unhealthy\n" +"\n" +"\t\t# Update all pods in the namespace\n" +"\t\tkubectl label pods --all status=unhealthy\n" +"\n" +"\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +"\t\tkubectl label -f pod.json status=unhealthy\n" +"\n" +"\t\t# Update pod 'foo' only if the resource is unchanged from version 1.\n" +"\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +"\n" +"\t\t# Update pod 'foo' by removing a label named 'bar' if it exists.\n" +"\t\t# Does not require the --overwrite flag.\n" +"\t\tkubectl label pods foo bar-" +msgstr "" +"\n" +"\t\t# Update pod ‘foo’ with the label ‘unhealthy’ and the value ‘true’.\n" +"\t\tkubectl label pods foo unhealthy=true\n" +"\n" +"\t\t# Update pod ‘foo’ with the label ‘status’ and the value ‘unhealthy’, " +"overwriting any existing value.\n" +"\t\tkubectl label —overwrite pods foo status=unhealthy\n" +"\n" +"\t\t# Update all pods in the namespace\n" +"\t\tkubectl label pods —all status=unhealthy\n" +"\n" +"\t\t# Update a pod identified by the type and name in “pod.json”\n" +"\t\tkubectl label -f pod.json status=unhealthy\n" +"\n" +"\t\t# Update pod ‘foo’ only if the resource is unchanged from version 1.\n" +"\t\tkubectl label pods foo status=unhealthy —resource-version=1\n" +"\n" +"\t\t# Update pod ‘foo’ by removing a label named ‘bar’ if it exists.\n" +"\t\t# Does not require the —overwrite flag.\n" +"\t\tkubectl label pods foo bar-" + +#: pkg/kubectl/cmd/rollingupdate.go:54 +msgid "" +"\n" +"\t\t# Update pods of frontend-v1 using new replication controller data in " +"frontend-v2.json.\n" +"\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +"\n" +"\t\t# Update pods of frontend-v1 using JSON data passed into stdin.\n" +"\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +"\n" +"\t\t# Update the pods of frontend-v1 to frontend-v2 by just changing the " +"image, and switching the\n" +"\t\t# name of the replication controller.\n" +"\t\tkubectl rolling-update frontend-v1 frontend-v2 --image=image:v2\n" +"\n" +"\t\t# Update the pods of frontend by just changing the image, and keeping " +"the old name.\n" +"\t\tkubectl rolling-update frontend --image=image:v2\n" +"\n" +"\t\t# Abort and reverse an existing rollout in progress (from frontend-v1 to " +"frontend-v2).\n" +"\t\tkubectl rolling-update frontend-v1 frontend-v2 --rollback" +msgstr "" +"\n" +"\t\t# Update pods of frontend-v1 using new replication controller data in " +"frontend-v2.json.\n" +"\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +"\n" +"\t\t# Update pods of frontend-v1 using JSON data passed into stdin.\n" +"\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +"\n" +"\t\t# Update the pods of frontend-v1 to frontend-v2 by just changing the " +"image, and switching the\n" +"\t\t# name of the replication controller.\n" +"\t\tkubectl rolling-update frontend-v1 frontend-v2 —image=image:v2\n" +"\n" +"\t\t# Update the pods of frontend by just changing the image, and keeping " +"the old name.\n" +"\t\tkubectl rolling-update frontend —image=image:v2\n" +"\n" +"\t\t# Abort and reverse an existing rollout in progress (from frontend-v1 to " +"frontend-v2).\n" +"\t\tkubectl rolling-update frontend-v1 frontend-v2 —rollback" + +#: pkg/kubectl/cmd/apply_view_last_applied.go:52 +msgid "" +"\n" +"\t\t# View the last-applied-configuration annotations by type/name in YAML.\n" +"\t\tkubectl apply view-last-applied deployment/nginx\n" +"\n" +"\t\t# View the last-applied-configuration annotations by file in JSON\n" +"\t\tkubectl apply view-last-applied -f deploy.yaml -o json" +msgstr "" +"\n" +"\t\t# View the last-applied-configuration annotations by type/name in YAML.\n" +"\t\tkubectl apply view-last-applied deployment/nginx\n" +"\n" +"\t\t# View the last-applied-configuration annotations by file in JSON\n" +"\t\tkubectl apply view-last-applied -f deploy.yaml -o json" + +#: pkg/kubectl/cmd/apply.go:75 +msgid "" +"\n" +"\t\tApply a configuration to a resource by filename or stdin.\n" +"\t\tThis resource will be created if it doesn't exist yet.\n" +"\t\tTo use 'apply', always create the resource initially with either 'apply' " +"or 'create --save-config'.\n" +"\n" +"\t\tJSON and YAML formats are accepted.\n" +"\n" +"\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do not " +"use unless you are aware of what the current state is. See https://issues." +"k8s.io/34274." +msgstr "" +"\n" +"\t\tApply a configuration to a resource by filename or stdin.\n" +"\t\tThis resource will be created if it doesn’t exist yet.\n" +"\t\tTo use ‘apply’, always create the resource initially with either ‘apply’ " +"or ‘create —save-config’.\n" +"\n" +"\t\tJSON and YAML formats are accepted.\n" +"\n" +"\t\tAlpha Disclaimer: the —prune functionality is not yet complete. Do not " +"use unless you are aware of what the current state is. See https://issues." +"k8s.io/34274." + +#: pkg/kubectl/cmd/convert.go:38 +msgid "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." +msgstr "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by —output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." + +#: pkg/kubectl/cmd/create_clusterrole.go:31 +msgid "" +"\n" +"\t\tCreate a ClusterRole." +msgstr "" +"\n" +"\t\tCreate a ClusterRole." + +#: pkg/kubectl/cmd/create_clusterrolebinding.go:32 +msgid "" +"\n" +"\t\tCreate a ClusterRoleBinding for a particular ClusterRole." +msgstr "" +"\n" +"\t\tCreate a ClusterRoleBinding for a particular ClusterRole." + +#: pkg/kubectl/cmd/create_rolebinding.go:32 +msgid "" +"\n" +"\t\tCreate a RoleBinding for a particular Role or ClusterRole." +msgstr "" +"\n" +"\t\tCreate a RoleBinding for a particular Role or ClusterRole." + +#: pkg/kubectl/cmd/create_secret.go:200 +msgid "" +"\n" +"\t\tCreate a TLS secret from the given public/private key pair.\n" +"\n" +"\t\tThe public/private key pair must exist before hand. The public key " +"certificate must be .PEM encoded and match the given private key." +msgstr "" +"\n" +"\t\tCreate a TLS secret from the given public/private key pair.\n" +"\n" +"\t\tThe public/private key pair must exist before hand. The public key " +"certificate must be .PEM encoded and match the given private key." + +#: pkg/kubectl/cmd/create_configmap.go:32 +msgid "" +"\n" +"\t\tCreate a configmap based on a file, directory, or specified literal " +"value.\n" +"\n" +"\t\tA single configmap may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a configmap based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key, you may " +"specify an alternate key.\n" +"\n" +"\t\tWhen creating a configmap based on a directory, each file whose basename " +"is a valid key in the directory will be\n" +"\t\tpackaged into the configmap. Any directory entries except regular files " +"are ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." +msgstr "" +"\n" +"\t\tCreate a configmap based on a file, directory, or specified literal " +"value.\n" +"\n" +"\t\tA single configmap may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a configmap based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key, you may " +"specify an alternate key.\n" +"\n" +"\t\tWhen creating a configmap based on a directory, each file whose basename " +"is a valid key in the directory will be\n" +"\t\tpackaged into the configmap. Any directory entries except regular files " +"are ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." + +#: pkg/kubectl/cmd/create_namespace.go:32 +msgid "" +"\n" +"\t\tCreate a namespace with the specified name." +msgstr "" +"\n" +"\t\tCreate a namespace with the specified name." + +#: pkg/kubectl/cmd/create_secret.go:119 +msgid "" +"\n" +"\t\tCreate a new secret for use with Docker registries.\n" +"\n" +"\t\tDockercfg secrets are used to authenticate against Docker registries.\n" +"\n" +"\t\tWhen using the Docker command line to push images, you can authenticate " +"to a given registry by running\n" +"\n" +"\t\t $ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +"password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +"\n" +" That produces a ~/.dockercfg file that is used by subsequent 'docker " +"push' and 'docker pull' commands to\n" +"\t\tauthenticate to the registry. The email address is optional.\n" +"\n" +"\t\tWhen creating applications, you may have a Docker registry that requires " +"authentication. In order for the\n" +"\t\tnodes to pull images on your behalf, they have to have the credentials. " +"You can provide this information\n" +"\t\tby creating a dockercfg secret and attaching it to your service account." +msgstr "" +"\n" +"\t\tCreate a new secret for use with Docker registries.\n" +"\n" +"\t\tDockercfg secrets are used to authenticate against Docker registries.\n" +"\n" +"\t\tWhen using the Docker command line to push images, you can authenticate " +"to a given registry by running\n" +"\n" +"\t\t $ docker login DOCKER_REGISTRY_SERVER —username=DOCKER_USER —" +"password=DOCKER_PASSWORD —email=DOCKER_EMAIL’.\n" +"\n" +" That produces a ~/.dockercfg file that is used by subsequent ‘docker " +"push’ and ‘docker pull’ commands to\n" +"\t\tauthenticate to the registry. The email address is optional.\n" +"\n" +"\t\tWhen creating applications, you may have a Docker registry that requires " +"authentication. In order for the\n" +"\t\tnodes to pull images on your behalf, they have to have the credentials. " +"You can provide this information\n" +"\t\tby creating a dockercfg secret and attaching it to your service account." + +#: pkg/kubectl/cmd/create_pdb.go:32 +msgid "" +"\n" +"\t\tCreate a pod disruption budget with the specified name, selector, and " +"desired minimum available pods" +msgstr "" +"\n" +"\t\tCreate a pod disruption budget with the specified name, selector, and " +"desired minimum available pods" + +#: pkg/kubectl/cmd/create.go:42 +msgid "" +"\n" +"\t\tCreate a resource by filename or stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted." +msgstr "" +"\n" +"\t\tCreate a resource by filename or stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted." + +#: pkg/kubectl/cmd/create_quota.go:32 +msgid "" +"\n" +"\t\tCreate a resourcequota with the specified name, hard limits and optional " +"scopes" +msgstr "" +"\n" +"\t\tCreate a resourcequota with the specified name, hard limits and optional " +"scopes" + +#: pkg/kubectl/cmd/create_role.go:38 +msgid "" +"\n" +"\t\tCreate a role with single rule." +msgstr "" +"\n" +"\t\tCreate a role with single rule." + +#: pkg/kubectl/cmd/create_secret.go:47 +msgid "" +"\n" +"\t\tCreate a secret based on a file, directory, or specified literal value.\n" +"\n" +"\t\tA single secret may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a secret based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key, you may " +"specify an alternate key.\n" +"\n" +"\t\tWhen creating a secret based on a directory, each file whose basename is " +"a valid key in the directory will be\n" +"\t\tpackaged into the secret. Any directory entries except regular files " +"are ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." +msgstr "" +"\n" +"\t\tCreate a secret based on a file, directory, or specified literal value.\n" +"\n" +"\t\tA single secret may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a secret based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key, you may " +"specify an alternate key.\n" +"\n" +"\t\tWhen creating a secret based on a directory, each file whose basename is " +"a valid key in the directory will be\n" +"\t\tpackaged into the secret. Any directory entries except regular files " +"are ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." + +#: pkg/kubectl/cmd/create_serviceaccount.go:32 +msgid "" +"\n" +"\t\tCreate a service account with the specified name." +msgstr "" +"\n" +"\t\tCreate a service account with the specified name." + +#: pkg/kubectl/cmd/run.go:52 +msgid "" +"\n" +"\t\tCreate and run a particular image, possibly replicated.\n" +"\n" +"\t\tCreates a deployment or job to manage the created container(s)." +msgstr "" +"\n" +"\t\tCreate and run a particular image, possibly replicated.\n" +"\n" +"\t\tCreates a deployment or job to manage the created container(s)." + +#: pkg/kubectl/cmd/autoscale.go:34 +msgid "" +"\n" +"\t\tCreates an autoscaler that automatically chooses and sets the number of " +"pods that run in a kubernetes cluster.\n" +"\n" +"\t\tLooks up a Deployment, ReplicaSet, or ReplicationController by name and " +"creates an autoscaler that uses the given resource as a reference.\n" +"\t\tAn autoscaler can automatically increase or decrease number of pods " +"deployed within the system as needed." +msgstr "" +"\n" +"\t\tCreates an autoscaler that automatically chooses and sets the number of " +"pods that run in a kubernetes cluster.\n" +"\n" +"\t\tLooks up a Deployment, ReplicaSet, or ReplicationController by name and " +"creates an autoscaler that uses the given resource as a reference.\n" +"\t\tAn autoscaler can automatically increase or decrease number of pods " +"deployed within the system as needed." + +#: pkg/kubectl/cmd/delete.go:40 +msgid "" +"\n" +"\t\tDelete resources by filenames, stdin, resources and names, or by " +"resources and label selector.\n" +"\n" +"\t\tJSON and YAML formats are accepted. Only one type of the arguments may " +"be specified: filenames,\n" +"\t\tresources and names, or resources and label selector.\n" +"\n" +"\t\tSome resources, such as pods, support graceful deletion. These resources " +"define a default period\n" +"\t\tbefore they are forcibly terminated (the grace period) but you may " +"override that value with\n" +"\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +"Because these resources often\n" +"\t\trepresent entities in the cluster, deletion may not be acknowledged " +"immediately. If the node\n" +"\t\thosting a pod is down or cannot reach the API server, termination may " +"take significantly longer\n" +"\t\tthan the grace period. To force delete a resource,\tyou must pass a grace" +"\tperiod of 0 and specify\n" +"\t\tthe --force flag.\n" +"\n" +"\t\tIMPORTANT: Force deleting pods does not wait for confirmation that the " +"pod's processes have been\n" +"\t\tterminated, which can leave those processes running until the node " +"detects the deletion and\n" +"\t\tcompletes graceful deletion. If your processes use shared storage or " +"talk to a remote API and\n" +"\t\tdepend on the name of the pod to identify themselves, force deleting " +"those pods may result in\n" +"\t\tmultiple processes running on different machines using the same " +"identification which may lead\n" +"\t\tto data corruption or inconsistency. Only force delete pods when you are " +"sure the pod is\n" +"\t\tterminated, or if your application can tolerate multiple copies of the " +"same pod running at once.\n" +"\t\tAlso, if you force delete pods the scheduler may place new pods on those " +"nodes before the node\n" +"\t\thas released those resources and causing those pods to be evicted " +"immediately.\n" +"\n" +"\t\tNote that the delete command does NOT do resource version checks, so if " +"someone\n" +"\t\tsubmits an update to a resource right when you submit a delete, their " +"update\n" +"\t\twill be lost along with the rest of the resource." +msgstr "" +"\n" +"\t\tDelete resources by filenames, stdin, resources and names, or by " +"resources and label selector.\n" +"\n" +"\t\tJSON and YAML formats are accepted. Only one type of the arguments may " +"be specified: filenames,\n" +"\t\tresources and names, or resources and label selector.\n" +"\n" +"\t\tSome resources, such as pods, support graceful deletion. These resources " +"define a default period\n" +"\t\tbefore they are forcibly terminated (the grace period) but you may " +"override that value with\n" +"\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +"Because these resources often\n" +"\t\trepresent entities in the cluster, deletion may not be acknowledged " +"immediately. If the node\n" +"\t\thosting a pod is down or cannot reach the API server, termination may " +"take significantly longer\n" +"\t\tthan the grace period. To force delete a resource,\tyou must pass a grace" +"\tperiod of 0 and specify\n" +"\t\tthe --force flag.\n" +"\n" +"\t\tIMPORTANT: Force deleting pods does not wait for confirmation that the " +"pod’s processes have been\n" +"\t\tterminated, which can leave those processes running until the node " +"detects the deletion and\n" +"\t\tcompletes graceful deletion. If your processes use shared storage or " +"talk to a remote API and\n" +"\t\tdepend on the name of the pod to identify themselves, force deleting " +"those pods may result in\n" +"\t\tmultiple processes running on different machines using the same " +"identification which may lead\n" +"\t\tto data corruption or inconsistency. Only force delete pods when you are " +"sure the pod is\n" +"\t\tterminated, or if your application can tolerate multiple copies of the " +"same pod running at once.\n" +"\t\tAlso, if you force delete pods the scheduler may place new pods on those " +"nodes before the node\n" +"\t\thas released those resources and causing those pods to be evicted " +"immediately.\n" +"\n" +"\t\tNote that the delete command does NOT do resource version checks, so if " +"someone\n" +"\t\tsubmits an update to a resource right when you submit a delete, their " +"update\n" +"\t\twill be lost along with the rest of the resource." + +#: pkg/kubectl/cmd/stop.go:31 +msgid "" +"\n" +"\t\tDeprecated: Gracefully shut down a resource by name or filename.\n" +"\n" +"\t\tThe stop command is deprecated, all its functionalities are covered by " +"delete command.\n" +"\t\tSee 'kubectl delete --help' for more details.\n" +"\n" +"\t\tAttempts to shut down and delete a resource that supports graceful " +"termination.\n" +"\t\tIf the resource is scalable it will be scaled to 0 before deletion." +msgstr "" +"\n" +"\t\tDeprecated: Gracefully shut down a resource by name or filename.\n" +"\n" +"\t\tThe stop command is deprecated, all its functionalities are covered by " +"delete command.\n" +"\t\tSee ‘kubectl delete —help’ for more details.\n" +"\n" +"\t\tAttempts to shut down and delete a resource that supports graceful " +"termination.\n" +"\t\tIf the resource is scalable it will be scaled to 0 before deletion." + +#: pkg/kubectl/cmd/top_node.go:60 +msgid "" +"\n" +"\t\tDisplay Resource (CPU/Memory/Storage) usage of nodes.\n" +"\n" +"\t\tThe top-node command allows you to see the resource consumption of nodes." +msgstr "" +"\n" +"\t\tDisplay Resource (CPU/Memory/Storage) usage of nodes.\n" +"\n" +"\t\tThe top-node command allows you to see the resource consumption of nodes." + +#: pkg/kubectl/cmd/top_pod.go:53 +msgid "" +"\n" +"\t\tDisplay Resource (CPU/Memory/Storage) usage of pods.\n" +"\n" +"\t\tThe 'top pod' command allows you to see the resource consumption of " +"pods.\n" +"\n" +"\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +"minutes\n" +"\t\tsince pod creation." +msgstr "" +"\n" +"\t\tDisplay Resource (CPU/Memory/Storage) usage of pods.\n" +"\n" +"\t\tThe ‘top pod’ command allows you to see the resource consumption of " +"pods.\n" +"\n" +"\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +"minutes\n" +"\t\tsince pod creation." + +#: pkg/kubectl/cmd/top.go:33 +msgid "" +"\n" +"\t\tDisplay Resource (CPU/Memory/Storage) usage.\n" +"\n" +"\t\tThe top command allows you to see the resource consumption for nodes or " +"pods.\n" +"\n" +"\t\tThis command requires Heapster to be correctly configured and working on " +"the server. " +msgstr "" +"\n" +"\t\tDisplay Resource (CPU/Memory/Storage) usage.\n" +"\n" +"\t\tThe top command allows you to see the resource consumption for nodes or " +"pods.\n" +"\n" +"\t\tThis command requires Heapster to be correctly configured and working on " +"the server. " + +#: pkg/kubectl/cmd/drain.go:140 +msgid "" +"\n" +"\t\tDrain node in preparation for maintenance.\n" +"\n" +"\t\tThe given node will be marked unschedulable to prevent new pods from " +"arriving.\n" +"\t\t'drain' evicts the pods if the APIServer supports eviction\n" +"\t\t(http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will use " +"normal DELETE\n" +"\t\tto delete the pods.\n" +"\t\tThe 'drain' evicts or deletes all pods except mirror pods (which cannot " +"be deleted through\n" +"\t\tthe API server). If there are DaemonSet-managed pods, drain will not " +"proceed\n" +"\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +"\t\tDaemonSet-managed pods, because those pods would be immediately replaced " +"by the\n" +"\t\tDaemonSet controller, which ignores unschedulable markings. If there " +"are any\n" +"\t\tpods that are neither mirror pods nor managed by ReplicationController,\n" +"\t\tReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete " +"any pods unless you\n" +"\t\tuse --force. --force will also allow deletion to proceed if the " +"managing resource of one\n" +"\t\tor more pods is missing.\n" +"\n" +"\t\t'drain' waits for graceful termination. You should not operate on the " +"machine until\n" +"\t\tthe command completes.\n" +"\n" +"\t\tWhen you are ready to put the node back into service, use kubectl " +"uncordon, which\n" +"\t\twill make the node schedulable again.\n" +"\n" +"\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" +msgstr "" +"\n" +"\t\tDrain node in preparation for maintenance.\n" +"\n" +"\t\tThe given node will be marked unschedulable to prevent new pods from " +"arriving.\n" +"\t\t'drain' evicts the pods if the APIServer supports eviction\n" +"\t\t(http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will use " +"normal DELETE\n" +"\t\tto delete the pods.\n" +"\t\tThe 'drain' evicts or deletes all pods except mirror pods (which cannot " +"be deleted through\n" +"\t\tthe API server). If there are DaemonSet-managed pods, drain will not " +"proceed\n" +"\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +"\t\tDaemonSet-managed pods, because those pods would be immediately replaced " +"by the\n" +"\t\tDaemonSet controller, which ignores unschedulable markings. If there " +"are any\n" +"\t\tpods that are neither mirror pods nor managed by ReplicationController,\n" +"\t\tReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete " +"any pods unless you\n" +"\t\tuse —force. —force will also allow deletion to proceed if the managing " +"resource of one\n" +"\t\tor more pods is missing.\n" +"\n" +"\t\t’drain’ waits for graceful termination. You should not operate on the " +"machine until\n" +"\t\tthe command completes.\n" +"\n" +"\t\tWhen you are ready to put the node back into service, use kubectl " +"uncordon, which\n" +"\t\twill make the node schedulable again.\n" +"\n" +"\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" + +#: pkg/kubectl/cmd/edit.go:56 +msgid "" +"\n" +"\t\tEdit a resource from the default editor.\n" +"\n" +"\t\tThe edit command allows you to directly edit any API resource you can " +"retrieve via the\n" +"\t\tcommand line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts filenames as well as command line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tEditing is done with the API version used to fetch the resource.\n" +"\t\tTo edit using a specific API version, fully-qualify the resource, " +"version, and group.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +"\n" +"\t\tThe flag --windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." +msgstr "" +"\n" +"\t\tEdit a resource from the default editor.\n" +"\n" +"\t\tThe edit command allows you to directly edit any API resource you can " +"retrieve via the\n" +"\t\tcommand line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts filenames as well as command line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tEditing is done with the API version used to fetch the resource.\n" +"\t\tTo edit using a specific API version, fully-qualify the resource, " +"version, and group.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify “-o json”.\n" +"\n" +"\t\tThe flag —windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." + +#: pkg/kubectl/cmd/drain.go:115 +msgid "" +"\n" +"\t\tMark node as schedulable." +msgstr "" +"\n" +"\t\tMark node as schedulable." + +#: pkg/kubectl/cmd/drain.go:90 +msgid "" +"\n" +"\t\tMark node as unschedulable." +msgstr "" +"\n" +"\t\tMark node as unschedulable." + +#: pkg/kubectl/cmd/completion.go:47 +msgid "" +"\n" +"\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +"\t\tThe shell code must be evaluated to provide interactive\n" +"\t\tcompletion of kubectl commands. This can be done by sourcing it from\n" +"\t\tthe .bash_profile.\n" +"\n" +"\t\tNote: this requires the bash-completion framework, which is not " +"installed\n" +"\t\tby default on Mac. This can be installed by using homebrew:\n" +"\n" +"\t\t $ brew install bash-completion\n" +"\n" +"\t\tOnce installed, bash_completion must be evaluated. This can be done by " +"adding the\n" +"\t\tfollowing line to the .bash_profile\n" +"\n" +"\t\t $ source $(brew --prefix)/etc/bash_completion\n" +"\n" +"\t\tNote for zsh users: [1] zsh completions are only supported in versions " +"of zsh >= 5.2" +msgstr "" +"\n" +"\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +"\t\tThe shell code must be evaluated to provide interactive\n" +"\t\tcompletion of kubectl commands. This can be done by sourcing it from\n" +"\t\tthe .bash_profile.\n" +"\n" +"\t\tNote: this requires the bash-completion framework, which is not " +"installed\n" +"\t\tby default on Mac. This can be installed by using homebrew:\n" +"\n" +"\t\t $ brew install bash-completion\n" +"\n" +"\t\tOnce installed, bash_completion must be evaluated. This can be done by " +"adding the\n" +"\t\tfollowing line to the .bash_profile\n" +"\n" +"\t\t $ source $(brew —prefix)/etc/bash_completion\n" +"\n" +"\t\tNote for zsh users: [1] zsh completions are only supported in versions " +"of zsh >= 5.2" + +#: pkg/kubectl/cmd/rollingupdate.go:45 +msgid "" +"\n" +"\t\tPerform a rolling update of the given ReplicationController.\n" +"\n" +"\t\tReplaces the specified replication controller with a new replication " +"controller by updating one pod at a time to use the\n" +"\t\tnew PodTemplate. The new-controller.json must specify the same namespace " +"as the\n" +"\t\texisting replication controller and overwrite at least one (common) " +"label in its replicaSelector.\n" +"\n" +"\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate.svg)" +msgstr "" +"\n" +"\t\tPerform a rolling update of the given ReplicationController.\n" +"\n" +"\t\tReplaces the specified replication controller with a new replication " +"controller by updating one pod at a time to use the\n" +"\t\tnew PodTemplate. The new-controller.json must specify the same namespace " +"as the\n" +"\t\texisting replication controller and overwrite at least one (common) " +"label in its replicaSelector.\n" +"\n" +"\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate.svg)" + +#: pkg/kubectl/cmd/replace.go:40 +msgid "" +"\n" +"\t\tReplace a resource by filename or stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted. If replacing an existing resource, " +"the\n" +"\t\tcomplete resource spec must be provided. This can be obtained by\n" +"\n" +"\t\t $ kubectl get TYPE NAME -o yaml\n" +"\n" +"\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +"github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/definitions." +"html to find if a field is mutable." +msgstr "" +"\n" +"\t\tReplace a resource by filename or stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted. If replacing an existing resource, " +"the\n" +"\t\tcomplete resource spec must be provided. This can be obtained by\n" +"\n" +"\t\t $ kubectl get TYPE NAME -o yaml\n" +"\n" +"\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +"github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/definitions." +"html to find if a field is mutable." + +#: pkg/kubectl/cmd/scale.go:34 +msgid "" +"\n" +"\t\tSet a new size for a Deployment, ReplicaSet, Replication Controller, or " +"Job.\n" +"\n" +"\t\tScale also allows users to specify one or more preconditions for the " +"scale action.\n" +"\n" +"\t\tIf --current-replicas or --resource-version is specified, it is " +"validated before the\n" +"\t\tscale is attempted, and it is guaranteed that the precondition holds " +"true when the\n" +"\t\tscale is sent to the server." +msgstr "" +"\n" +"\t\tSet a new size for a Deployment, ReplicaSet, Replication Controller, or " +"Job.\n" +"\n" +"\t\tScale also allows users to specify one or more preconditions for the " +"scale action.\n" +"\n" +"\t\tIf —current-replicas or —resource-version is specified, it is validated " +"before the\n" +"\t\tscale is attempted, and it is guaranteed that the precondition holds " +"true when the\n" +"\t\tscale is sent to the server." + +#: pkg/kubectl/cmd/apply_set_last_applied.go:62 +msgid "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." +msgstr "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"‘kubectl apply -f ’ was run,\n" +"\t\twithout updating any other parts of the object." + +#: pkg/kubectl/cmd/proxy.go:36 +msgid "" +"\n" +"\t\tTo proxy all of the kubernetes api and nothing else, use:\n" +"\n" +"\t\t $ kubectl proxy --api-prefix=/\n" +"\n" +"\t\tTo proxy only part of the kubernetes api and also some static files:\n" +"\n" +"\t\t $ kubectl proxy --www=/my/files --www-prefix=/static/ --api-prefix=/" +"api/\n" +"\n" +"\t\tThe above lets you 'curl localhost:8001/api/v1/pods'.\n" +"\n" +"\t\tTo proxy the entire kubernetes api at a different root, use:\n" +"\n" +"\t\t $ kubectl proxy --api-prefix=/custom/\n" +"\n" +"\t\tThe above lets you 'curl localhost:8001/custom/api/v1/pods'" +msgstr "" +"\n" +"\t\tTo proxy all of the kubernetes api and nothing else, use:\n" +"\n" +"\t\t $ kubectl proxy —api-prefix=/\n" +"\n" +"\t\tTo proxy only part of the kubernetes api and also some static files:\n" +"\n" +"\t\t $ kubectl proxy —www=/my/files —www-prefix=/static/ —api-prefix=/" +"api/\n" +"\n" +"\t\tThe above lets you ‘curl localhost:8001/api/v1/pods’.\n" +"\n" +"\t\tTo proxy the entire kubernetes api at a different root, use:\n" +"\n" +"\t\t $ kubectl proxy —api-prefix=/custom/\n" +"\n" +"\t\tThe above lets you ‘curl localhost:8001/custom/api/v1/pods’" + +#: pkg/kubectl/cmd/patch.go:59 +msgid "" +"\n" +"\t\tUpdate field(s) of a resource using strategic merge patch\n" +"\n" +"\t\tJSON and YAML formats are accepted.\n" +"\n" +"\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +"github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/definitions." +"html to find if a field is mutable." +msgstr "" +"\n" +"\t\tUpdate field(s) of a resource using strategic merge patch\n" +"\n" +"\t\tJSON and YAML formats are accepted.\n" +"\n" +"\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +"github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/definitions." +"html to find if a field is mutable." + +#: pkg/kubectl/cmd/label.go:70 +#, c-format +msgid "" +"\n" +"\t\tUpdate the labels on a resource.\n" +"\n" +"\t\t* A label must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\t* If --overwrite is true, then existing labels can be overwritten, " +"otherwise attempting to overwrite a label will result in an error.\n" +"\t\t* If --resource-version is specified, then updates will use this " +"resource version, otherwise the existing resource-version will be used." +msgstr "" +"\n" +"\t\tUpdate the labels on a resource.\n" +"\n" +"\t\t* A label must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\t* If —overwrite is true, then existing labels can be overwritten, " +"otherwise attempting to overwrite a label will result in an error.\n" +"\t\t* If —resource-version is specified, then updates will use this resource " +"version, otherwise the existing resource-version will be used." + +#: pkg/kubectl/cmd/taint.go:58 +#, c-format +msgid "" +"\n" +"\t\tUpdate the taints on one or more nodes.\n" +"\n" +"\t\t* A taint consists of a key, value, and effect. As an argument here, it " +"is expressed as key=value:effect.\n" +"\t\t* The key must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\t* The value must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[2]d characters.\n" +"\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +"\t\t* Currently taint can only apply to node." +msgstr "" +"\n" +"\t\tUpdate the taints on one or more nodes.\n" +"\n" +"\t\t* A taint consists of a key, value, and effect. As an argument here, it " +"is expressed as key=value:effect.\n" +"\t\t* The key must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\t* The value must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[2]d characters.\n" +"\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +"\t\t* Currently taint can only apply to node." + +#: pkg/kubectl/cmd/apply_view_last_applied.go:46 +msgid "" +"\n" +"\t\tView the latest last-applied-configuration annotations by type/name or " +"file.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change output format." +msgstr "" +"\n" +"\t\tView the latest last-applied-configuration annotations by type/name or " +"file.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change output format." + +#: pkg/kubectl/cmd/cp.go:37 +msgid "" +"\n" +"\t # !!!Important Note!!!\n" +"\t # Requires that the 'tar' binary is present in your container\n" +"\t # image. If 'tar' is not present, 'kubectl cp' will fail.\n" +"\n" +"\t # Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in " +"the default namespace\n" +"\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +"\n" +" # Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific " +"container\n" +"\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl cp /:/tmp/foo /tmp/bar" +msgstr "" +"\n" +"\t # !!!Important Note!!!\n" +"\t # Requires that the ‘tar’ binary is present in your container\n" +"\t # image. If ‘tar’ is not present, ‘kubectl cp’ will fail.\n" +"\n" +"\t # Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in " +"the default namespace\n" +"\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +"\n" +" # Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific " +"container\n" +"\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl cp /:/tmp/foo /tmp/bar" + +#: pkg/kubectl/cmd/create_secret.go:205 +msgid "" +"\n" +"\t # Create a new TLS secret named tls-secret with the given key pair:\n" +"\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --key=path/" +"to/tls.key" +msgstr "" +"\n" +"\t # Create a new TLS secret named tls-secret with the given key pair:\n" +"\t kubectl create secret tls tls-secret —cert=path/to/tls.cert —key=path/to/" +"tls.key" + +#: pkg/kubectl/cmd/create_namespace.go:35 +msgid "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" +msgstr "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" + +#: pkg/kubectl/cmd/create_secret.go:59 +msgid "" +"\n" +"\t # Create a new secret named my-secret with keys for each file in folder " +"bar\n" +"\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +"\n" +"\t # Create a new secret named my-secret with specified keys instead of " +"names on disk\n" +"\t kubectl create secret generic my-secret --from-file=ssh-privatekey=~/." +"ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +"\n" +"\t # Create a new secret named my-secret with key1=supersecret and " +"key2=topsecret\n" +"\t kubectl create secret generic my-secret --from-literal=key1=supersecret " +"--from-literal=key2=topsecret" +msgstr "" +"\n" +"\t # Create a new secret named my-secret with keys for each file in folder " +"bar\n" +"\t kubectl create secret generic my-secret —from-file=path/to/bar\n" +"\n" +"\t # Create a new secret named my-secret with specified keys instead of " +"names on disk\n" +"\t kubectl create secret generic my-secret —from-file=ssh-privatekey=~/.ssh/" +"id_rsa —from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +"\n" +"\t # Create a new secret named my-secret with key1=supersecret and " +"key2=topsecret\n" +"\t kubectl create secret generic my-secret —from-literal=key1=supersecret —" +"from-literal=key2=topsecret" + +#: pkg/kubectl/cmd/create_serviceaccount.go:35 +msgid "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" +msgstr "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" + +#: pkg/kubectl/cmd/create_service.go:232 +msgid "" +"\n" +"\t# Create a new ExternalName service named my-ns \n" +"\tkubectl create service externalname my-ns --external-name bar.com" +msgstr "" +"\n" +"\t# Create a new ExternalName service named my-ns \n" +"\tkubectl create service externalname my-ns —external-name bar.com" + +#: pkg/kubectl/cmd/create_service.go:225 +msgid "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." +msgstr "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." + +#: pkg/kubectl/cmd/help.go:30 +msgid "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." +msgstr "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." + +#: pkg/kubectl/cmd/create_service.go:173 +msgid "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" +msgstr "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs —tcp=5678:8080" + +#: pkg/kubectl/cmd/create_service.go:53 +msgid "" +"\n" +" # Create a new clusterIP service named my-cs\n" +" kubectl create service clusterip my-cs --tcp=5678:8080\n" +"\n" +" # Create a new clusterIP service named my-cs (in headless mode)\n" +" kubectl create service clusterip my-cs --clusterip=\"None\"" +msgstr "" +"\n" +" # Create a new clusterIP service named my-cs\n" +" kubectl create service clusterip my-cs —tcp=5678:8080\n" +"\n" +" # Create a new clusterIP service named my-cs (in headless mode)\n" +" kubectl create service clusterip my-cs —clusterip=“None”" + +#: pkg/kubectl/cmd/create_deployment.go:36 +msgid "" +"\n" +" # Create a new deployment named my-dep that runs the busybox image.\n" +" kubectl create deployment my-dep --image=busybox" +msgstr "" +"\n" +" # Create a new deployment named my-dep that runs the busybox image.\n" +" kubectl create deployment my-dep —image=busybox" + +#: pkg/kubectl/cmd/create_service.go:116 +msgid "" +"\n" +" # Create a new nodeport service named my-ns\n" +" kubectl create service nodeport my-ns --tcp=5678:8080" +msgstr "" +"\n" +" # Create a new nodeport service named my-ns\n" +" kubectl create service nodeport my-ns —tcp=5678:8080" + +#: pkg/kubectl/cmd/clusterinfo_dump.go:62 +msgid "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" +msgstr "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump —output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump —all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump —namespaces default,kube-system —output-" +"directory=/path/to/cluster-state" + +#: pkg/kubectl/cmd/annotate.go:78 +msgid "" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend'.\n" +" # If the same annotation is set multiple times, only the last value will " +"be applied\n" +" kubectl annotate pods foo description='my frontend'\n" +"\n" +" # Update a pod identified by type and name in \"pod.json\"\n" +" kubectl annotate -f pod.json description='my frontend'\n" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend running nginx', overwriting any existing value.\n" +" kubectl annotate --overwrite pods foo description='my frontend running " +"nginx'\n" +"\n" +" # Update all pods in the namespace\n" +" kubectl annotate pods --all description='my frontend running nginx'\n" +"\n" +" # Update pod 'foo' only if the resource is unchanged from version 1.\n" +" kubectl annotate pods foo description='my frontend running nginx' --" +"resource-version=1\n" +"\n" +" # Update pod 'foo' by removing an annotation named 'description' if it " +"exists.\n" +" # Does not require the --overwrite flag.\n" +" kubectl annotate pods foo description-" +msgstr "" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend'.\n" +" # If the same annotation is set multiple times, only the last value will " +"be applied\n" +" kubectl annotate pods foo description='my frontend'\n" +"\n" +" # Update a pod identified by type and name in \"pod.json\"\n" +" kubectl annotate -f pod.json description=‘my frontend’\n" +"\n" +" # Update pod ‘foo’ with the annotation ‘description’ and the value ‘my " +"frontend running nginx’, overwriting any existing value.\n" +" kubectl annotate —overwrite pods foo description=‘my frontend running " +"nginx’\n" +"\n" +" # Update all pods in the namespace\n" +" kubectl annotate pods —all description=‘my frontend running nginx’\n" +"\n" +" # Update pod ‘foo’ only if the resource is unchanged from version 1.\n" +" kubectl annotate pods foo description=‘my frontend running nginx’ —" +"resource-version=1\n" +"\n" +" # Update pod ‘foo’ by removing an annotation named ‘description’ if it " +"exists.\n" +" # Does not require the —overwrite flag.\n" +" kubectl annotate pods foo description-" + +#: pkg/kubectl/cmd/create_service.go:170 +msgid "" +"\n" +" Create a LoadBalancer service with the specified name." +msgstr "" +"\n" +" Create a LoadBalancer service with the specified name." + +#: pkg/kubectl/cmd/create_service.go:50 +msgid "" +"\n" +" Create a clusterIP service with the specified name." +msgstr "" +"\n" +" Create a clusterIP service with the specified name." + +#: pkg/kubectl/cmd/create_deployment.go:33 +msgid "" +"\n" +" Create a deployment with the specified name." +msgstr "" +"\n" +" Create a deployment with the specified name." + +#: pkg/kubectl/cmd/create_service.go:113 +msgid "" +"\n" +" Create a nodeport service with the specified name." +msgstr "" +"\n" +" Create a nodeport service with the specified name." + +#: pkg/kubectl/cmd/clusterinfo_dump.go:53 +msgid "" +"\n" +" Dumps cluster info out suitable for debugging and diagnosing cluster " +"problems. By default, dumps everything to\n" +" stdout. You can optionally specify a directory with --output-directory. " +"If you specify a directory, kubernetes will\n" +" build a set of files in that directory. By default only dumps things in " +"the 'kube-system' namespace, but you can\n" +" switch to a different namespace with the --namespaces flag, or specify --" +"all-namespaces to dump all namespaces.\n" +"\n" +" The command also dumps the logs of all of the pods in the cluster, these " +"logs are dumped into different directories\n" +" based on namespace and pod name." +msgstr "" +"\n" +" Dumps cluster info out suitable for debugging and diagnosing cluster " +"problems. By default, dumps everything to\n" +" stdout. You can optionally specify a directory with —output-directory. " +"If you specify a directory, kubernetes will\n" +" build a set of files in that directory. By default only dumps things in " +"the ‘kube-system’ namespace, but you can\n" +" switch to a different namespace with the —namespaces flag, or specify —" +"all-namespaces to dump all namespaces.\n" +"\n" +" The command also dumps the logs of all of the pods in the cluster, these " +"logs are dumped into different directories\n" +" based on namespace and pod name." + +#: pkg/kubectl/cmd/clusterinfo.go:37 +msgid "" +"\n" +" Display addresses of the master and services with label kubernetes.io/" +"cluster-service=true\n" +" To further debug and diagnose cluster problems, use 'kubectl cluster-info " +"dump'." +msgstr "" +"\n" +" Display addresses of the master and services with label kubernetes.io/" +"cluster-service=true\n" +" To further debug and diagnose cluster problems, use ‘kubectl cluster-info " +"dump’." + +#: pkg/kubectl/cmd/create_quota.go:62 +msgid "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." +msgstr "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." + +#: pkg/kubectl/cmd/create_quota.go:61 +msgid "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." +msgstr "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." + +#: pkg/kubectl/cmd/create_pdb.go:64 +msgid "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." +msgstr "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." + +#: pkg/kubectl/cmd/expose.go:104 +msgid "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" +msgstr "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" + +#: pkg/kubectl/cmd/run.go:139 +msgid "A schedule in the Cron format the job should be run with." +msgstr "A schedule in the Cron format the job should be run with." + +#: pkg/kubectl/cmd/expose.go:109 +msgid "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." +msgstr "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." + +#: pkg/kubectl/cmd/expose.go:110 pkg/kubectl/cmd/run.go:122 +msgid "" +"An inline JSON override for the generated object. If this is non-empty, it " +"is used to override the generated object. Requires that the object supply a " +"valid apiVersion field." +msgstr "" +"An inline JSON override for the generated object. If this is non-empty, it " +"is used to override the generated object. Requires that the object supply a " +"valid apiVersion field." + +#: pkg/kubectl/cmd/run.go:137 +msgid "" +"An inline JSON override for the generated service object. If this is non-" +"empty, it is used to override the generated object. Requires that the object " +"supply a valid apiVersion field. Only used if --expose is true." +msgstr "" +"An inline JSON override for the generated service object. If this is non-" +"empty, it is used to override the generated object. Requires that the object " +"supply a valid apiVersion field. Only used if —expose is true." # https://github.com/kubernetes/kubernetes/blob/masterpkg/kubectl/cmd/apply.go#L98 +#: pkg/kubectl/cmd/apply.go:104 msgid "Apply a configuration to a resource by filename or stdin" msgstr "ファイル名を指定または標準入力経由でリソースにコンフィグを適用する" +#: pkg/kubectl/cmd/certificates.go:72 +msgid "Approve a certificate signing request" +msgstr "Approve a certificate signing request" + +#: pkg/kubectl/cmd/create_service.go:82 +msgid "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." +msgstr "" +"Assign your own ClusterIP or set to ‘None’ for a ‘headless’ service (no " +"loadbalancing)." + +#: pkg/kubectl/cmd/attach.go:70 +msgid "Attach to a running container" +msgstr "Attach to a running container" + +#: pkg/kubectl/cmd/autoscale.go:56 +msgid "Auto-scale a Deployment, ReplicaSet, or ReplicationController" +msgstr "Auto-scale a Deployment, ReplicaSet, or ReplicationController" + +#: pkg/kubectl/cmd/expose.go:113 +msgid "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to 'None' to create a headless service." +msgstr "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to ‘None’ to create a headless service." + +#: pkg/kubectl/cmd/create_clusterrolebinding.go:56 +msgid "ClusterRole this ClusterRoleBinding should reference" +msgstr "ClusterRole this ClusterRoleBinding should reference" + +#: pkg/kubectl/cmd/create_rolebinding.go:56 +msgid "ClusterRole this RoleBinding should reference" +msgstr "ClusterRole this RoleBinding should reference" + +#: pkg/kubectl/cmd/rollingupdate.go:102 +msgid "" +"Container name which will have its image upgraded. Only relevant when --" +"image is specified, ignored otherwise. Required when using --image on a " +"multi-container pod" +msgstr "" +"Container name which will have its image upgraded. Only relevant when —image " +"is specified, ignored otherwise. Required when using —image on a multi-" +"container pod" + +#: pkg/kubectl/cmd/convert.go:68 +msgid "Convert config files between different API versions" +msgstr "Convert config files between different API versions" + +#: pkg/kubectl/cmd/cp.go:65 +msgid "Copy files and directories to and from containers." +msgstr "Copy files and directories to and from containers." + +#: pkg/kubectl/cmd/create_clusterrolebinding.go:44 +msgid "Create a ClusterRoleBinding for a particular ClusterRole" +msgstr "Create a ClusterRoleBinding for a particular ClusterRole" + +#: pkg/kubectl/cmd/create_service.go:182 +msgid "Create a LoadBalancer service." +msgstr "Create a LoadBalancer service." + +#: pkg/kubectl/cmd/create_service.go:125 +msgid "Create a NodePort service." +msgstr "Create a NodePort service." + +#: pkg/kubectl/cmd/create_rolebinding.go:44 +msgid "Create a RoleBinding for a particular Role or ClusterRole" +msgstr "Create a RoleBinding for a particular Role or ClusterRole" + +#: pkg/kubectl/cmd/create_secret.go:214 +msgid "Create a TLS secret" +msgstr "Create a TLS secret" + +#: pkg/kubectl/cmd/create_service.go:69 +msgid "Create a clusterIP service." +msgstr "Create a clusterIP service." + +#: pkg/kubectl/cmd/create_configmap.go:60 +msgid "Create a configmap from a local file, directory or literal value" +msgstr "Create a configmap from a local file, directory or literal value" + +#: pkg/kubectl/cmd/create_deployment.go:46 +msgid "Create a deployment with the specified name." +msgstr "Create a deployment with the specified name." + +#: pkg/kubectl/cmd/create_namespace.go:45 +msgid "Create a namespace with the specified name" +msgstr "Create a namespace with the specified name" + +#: pkg/kubectl/cmd/create_pdb.go:50 +msgid "Create a pod disruption budget with the specified name." +msgstr "Create a pod disruption budget with the specified name." + +#: pkg/kubectl/cmd/create_quota.go:48 +msgid "Create a quota with the specified name." +msgstr "Create a quota with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/masterpkg/kubectl/cmd/apply.go#L98 +#: pkg/kubectl/cmd/create.go:63 +msgid "Create a resource by filename or stdin" +msgstr "ファイル名を指定または標準入力経由でリソースを作成する" + +#: pkg/kubectl/cmd/create_secret.go:144 +msgid "Create a secret for use with a Docker registry" +msgstr "Create a secret for use with a Docker registry" + +#: pkg/kubectl/cmd/create_secret.go:74 +msgid "Create a secret from a local file, directory or literal value" +msgstr "Create a secret from a local file, directory or literal value" + +#: pkg/kubectl/cmd/create_secret.go:35 +msgid "Create a secret using specified subcommand" +msgstr "Create a secret using specified subcommand" + +#: pkg/kubectl/cmd/create_serviceaccount.go:45 +msgid "Create a service account with the specified name" +msgstr "Create a service account with the specified name" + +#: pkg/kubectl/cmd/create_service.go:37 +msgid "Create a service using specified subcommand." +msgstr "Create a service using specified subcommand." + +#: pkg/kubectl/cmd/create_service.go:241 +msgid "Create an ExternalName service." +msgstr "Create an ExternalName service." + +#: pkg/kubectl/cmd/delete.go:132 +msgid "" +"Delete resources by filenames, stdin, resources and names, or by resources " +"and label selector" +msgstr "" +"Delete resources by filenames, stdin, resources and names, or by resources " +"and label selector" + # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_cluster.go#L38 +#: pkg/kubectl/cmd/config/delete_cluster.go:39 msgid "Delete the specified cluster from the kubeconfig" msgstr "kubeconfigから指定したクラスターを削除する" # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_context.go#L38 +#: pkg/kubectl/cmd/config/delete_context.go:39 msgid "Delete the specified context from the kubeconfig" msgstr "kubeconfigから指定したコンテキストを削除する" +#: pkg/kubectl/cmd/certificates.go:122 +msgid "Deny a certificate signing request" +msgstr "Deny a certificate signing request" + +#: pkg/kubectl/cmd/stop.go:59 +msgid "Deprecated: Gracefully shut down a resource by name or filename" +msgstr "Deprecated: Gracefully shut down a resource by name or filename" + # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_contexts.go#L62 +#: pkg/kubectl/cmd/config/get_contexts.go:64 msgid "Describe one or many contexts" msgstr "1つまたは複数のコンテキストを記述する" +#: pkg/kubectl/cmd/top_node.go:78 +msgid "Display Resource (CPU/Memory) usage of nodes" +msgstr "Display Resource (CPU/Memory) usage of nodes" + +#: pkg/kubectl/cmd/top_pod.go:80 +msgid "Display Resource (CPU/Memory) usage of pods" +msgstr "Display Resource (CPU/Memory) usage of pods" + +#: pkg/kubectl/cmd/top.go:44 +msgid "Display Resource (CPU/Memory) usage." +msgstr "Display Resource (CPU/Memory) usage." + # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_clusters.go#L40 +#: pkg/kubectl/cmd/clusterinfo.go:51 +#| msgid "Display clusters defined in the kubeconfig" +msgid "Display cluster info" +msgstr "クラスターの情報を表示する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_clusters.go#L40 +#: pkg/kubectl/cmd/config/get_clusters.go:41 msgid "Display clusters defined in the kubeconfig" msgstr "kubeconfigで定義されたクラスターを表示する" # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/view.go#L64 +#: pkg/kubectl/cmd/config/view.go:67 msgid "Display merged kubeconfig settings or a specified kubeconfig file" -msgstr "マージされたkubeconfigの設定または指定されたkubeconfigファイルを表示する" +msgstr "" +"マージされたkubeconfigの設定または指定されたkubeconfigファイルを表示する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_contexts.go#L62 +#: pkg/kubectl/cmd/get.go:111 +msgid "Display one or many resources" +msgstr "1つまたは複数のリソースを表示する" # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/current_context.go#L48 +#: pkg/kubectl/cmd/config/current_context.go:49 msgid "Displays the current-context" msgstr "カレントコンテキストを表示する" +#: pkg/kubectl/cmd/explain.go:51 +msgid "Documentation of resources" +msgstr "リソースの説明を表示する" + +#: pkg/kubectl/cmd/drain.go:178 +msgid "Drain node in preparation for maintenance" +msgstr "Drain node in preparation for maintenance" + +#: pkg/kubectl/cmd/clusterinfo_dump.go:39 +msgid "Dump lots of relevant info for debugging and diagnosis" +msgstr "Dump lots of relevant info for debugging and diagnosis" + +#: pkg/kubectl/cmd/edit.go:110 +msgid "Edit a resource on the server" +msgstr "Edit a resource on the server" + +#: pkg/kubectl/cmd/create_secret.go:160 +msgid "Email for Docker registry" +msgstr "Email for Docker registry" + +#: pkg/kubectl/cmd/exec.go:69 +msgid "Execute a command in a container" +msgstr "Execute a command in a container" + +#: pkg/kubectl/cmd/rollingupdate.go:103 +msgid "" +"Explicit policy for when to pull container images. Required when --image is " +"same as existing image, ignored otherwise." +msgstr "" +"Explicit policy for when to pull container images. Required when —image is " +"same as existing image, ignored otherwise." + +#: pkg/kubectl/cmd/portforward.go:76 +msgid "Forward one or more local ports to a pod" +msgstr "Forward one or more local ports to a pod" + +#: pkg/kubectl/cmd/help.go:37 +msgid "Help about any command" +msgstr "Help about any command" + +#: pkg/kubectl/cmd/expose.go:103 +msgid "" +"IP to assign to the Load Balancer. If empty, an ephemeral IP will be created " +"and used (cloud-provider specific)." +msgstr "" +"IP to assign to the Load Balancer. If empty, an ephemeral IP will be created " +"and used (cloud-provider specific)." + +#: pkg/kubectl/cmd/expose.go:112 +msgid "" +"If non-empty, set the session affinity for the service to this; legal " +"values: 'None', 'ClientIP'" +msgstr "" +"If non-empty, set the session affinity for the service to this; legal " +"values: ‘None’, ‘ClientIP’" + +#: pkg/kubectl/cmd/annotate.go:136 +msgid "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." + +#: pkg/kubectl/cmd/label.go:134 +msgid "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." + +#: pkg/kubectl/cmd/rollingupdate.go:99 +msgid "" +"Image to use for upgrading the replication controller. Must be distinct from " +"the existing image (either new image or new image tag). Can not be used " +"with --filename/-f" +msgstr "" +"Image to use for upgrading the replication controller. Must be distinct from " +"the existing image (either new image or new image tag). Can not be used " +"with —filename/-f" + +#: pkg/kubectl/cmd/rollout/rollout.go:47 +msgid "Manage a deployment rollout" +msgstr "Manage a deployment rollout" + +#: pkg/kubectl/cmd/drain.go:128 +msgid "Mark node as schedulable" +msgstr "Mark node as schedulable" + +#: pkg/kubectl/cmd/drain.go:103 +msgid "Mark node as unschedulable" +msgstr "Mark node as unschedulable" + +#: pkg/kubectl/cmd/rollout/rollout_pause.go:74 +msgid "Mark the provided resource as paused" +msgstr "Mark the provided resource as paused" + +#: pkg/kubectl/cmd/certificates.go:36 +msgid "Modify certificate resources." +msgstr "Modify certificate resources." + # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/config.go#L39 +#: pkg/kubectl/cmd/config/config.go:40 msgid "Modify kubeconfig files" msgstr "kubeconfigファイルを変更する" +#: pkg/kubectl/cmd/expose.go:108 +msgid "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." +msgstr "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." + +#: pkg/kubectl/cmd/logs.go:113 +msgid "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." +msgstr "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." + +#: pkg/kubectl/cmd/completion.go:104 +msgid "Output shell completion code for the specified shell (bash or zsh)" +msgstr "Output shell completion code for the specified shell (bash or zsh)" + +#: pkg/kubectl/cmd/convert.go:85 +msgid "" +"Output the formatted object with the given group version (for ex: " +"'extensions/v1beta1').)" +msgstr "" +"Output the formatted object with the given group version (for ex: " +"‘extensions/v1beta1’).)" + +#: pkg/kubectl/cmd/create_secret.go:158 +msgid "Password for Docker registry authentication" +msgstr "Password for Docker registry authentication" + +#: pkg/kubectl/cmd/create_secret.go:226 +msgid "Path to PEM encoded public key certificate." +msgstr "Path to PEM encoded public key certificate." + +#: pkg/kubectl/cmd/create_secret.go:227 +msgid "Path to private key associated with given certificate." +msgstr "Path to private key associated with given certificate." + +#: pkg/kubectl/cmd/rollingupdate.go:85 +msgid "Perform a rolling update of the given ReplicationController" +msgstr "Perform a rolling update of the given ReplicationController" + +#: pkg/kubectl/cmd/scale.go:83 +msgid "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." +msgstr "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." + +#: pkg/kubectl/cmd/version.go:40 +msgid "Print the client and server version information" +msgstr "Print the client and server version information" + +#: pkg/kubectl/cmd/options.go:38 +msgid "Print the list of flags inherited by all commands" +msgstr "Print the list of flags inherited by all commands" + +#: pkg/kubectl/cmd/logs.go:93 +msgid "Print the logs for a container in a pod" +msgstr "Print the logs for a container in a pod" + +# https://github.com/kubernetes/kubernetes/blob/masterpkg/kubectl/cmd/apply.go#L98 +#: pkg/kubectl/cmd/replace.go:71 +msgid "Replace a resource by filename or stdin" +msgstr "Replace a resource by filename or stdin" + +#: pkg/kubectl/cmd/rollout/rollout_resume.go:72 +msgid "Resume a paused resource" +msgstr "Resume a paused resource" + +#: pkg/kubectl/cmd/create_rolebinding.go:57 +msgid "Role this RoleBinding should reference" +msgstr "Role this RoleBinding should reference" + +#: pkg/kubectl/cmd/run.go:97 +msgid "Run a particular image on the cluster" +msgstr "Run a particular image on the cluster" + +#: pkg/kubectl/cmd/proxy.go:69 +msgid "Run a proxy to the Kubernetes API server" +msgstr "Run a proxy to the Kubernetes API server" + +#: pkg/kubectl/cmd/create_secret.go:161 +msgid "Server location for Docker registry" +msgstr "Server location for Docker registry" + +#: pkg/kubectl/cmd/scale.go:71 +msgid "" +"Set a new size for a Deployment, ReplicaSet, Replication Controller, or Job" +msgstr "" +"Set a new size for a Deployment, ReplicaSet, Replication Controller, or Job" + +#: pkg/kubectl/cmd/set/set.go:38 +msgid "Set specific features on objects" +msgstr "Set specific features on objects" + +#: pkg/kubectl/cmd/apply_set_last_applied.go:83 +msgid "" +"Set the last-applied-configuration annotation on a live object to match the " +"contents of a file." +msgstr "" +"Set the last-applied-configuration annotation on a live object to match the " +"contents of a file." + +#: pkg/kubectl/cmd/set/set_selector.go:82 +msgid "Set the selector on a resource" +msgstr "リソースのセレクターを設定する" + # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_cluster.go#L67 +#: pkg/kubectl/cmd/config/create_cluster.go:68 msgid "Sets a cluster entry in kubeconfig" msgstr "kubeconfigにクラスターエントリを設定する" # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_context.go#L57 +#: pkg/kubectl/cmd/config/create_context.go:58 msgid "Sets a context entry in kubeconfig" msgstr "kubeconfigにコンテキストエントリを設定する" # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_authinfo.go#L103 +#: pkg/kubectl/cmd/config/create_authinfo.go:104 msgid "Sets a user entry in kubeconfig" msgstr "kubeconfigにユーザーエントリを設定する" # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/set.go#L59 +#: pkg/kubectl/cmd/config/set.go:60 msgid "Sets an individual value in a kubeconfig file" msgstr "kubeconfigファイル内の変数を個別に設定する" # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/use_context.go#L48 +#: pkg/kubectl/cmd/config/use_context.go:49 msgid "Sets the current-context in a kubeconfig file" msgstr "kubeconfigにカレントコンテキストを設定する" +#: pkg/kubectl/cmd/describe.go:86 +msgid "Show details of a specific resource or group of resources" +msgstr "Show details of a specific resource or group of resources" + +#: pkg/kubectl/cmd/rollout/rollout_status.go:58 +msgid "Show the status of the rollout" +msgstr "Show the status of the rollout" + +#: pkg/kubectl/cmd/expose.go:106 +msgid "Synonym for --target-port" +msgstr "Synonym for —target-port" + +#: pkg/kubectl/cmd/expose.go:88 +msgid "" +"Take a replication controller, service, deployment or pod and expose it as a " +"new Kubernetes Service" +msgstr "" +"Take a replication controller, service, deployment or pod and expose it as a " +"new Kubernetes Service" + +#: pkg/kubectl/cmd/run.go:117 +msgid "The image for the container to run." +msgstr "The image for the container to run." + +#: pkg/kubectl/cmd/run.go:119 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" +msgstr "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" + +#: pkg/kubectl/cmd/rollingupdate.go:101 +msgid "" +"The key to use to differentiate between two different controllers, default " +"'deployment'. Only relevant when --image is specified, ignored otherwise" +msgstr "" +"The key to use to differentiate between two different controllers, default " +"‘deployment’. Only relevant when —image is specified, ignored otherwise" + +#: pkg/kubectl/cmd/create_pdb.go:63 +msgid "" +"The minimum number or percentage of available pods this budget requires." +msgstr "" +"The minimum number or percentage of available pods this budget requires." + +#: pkg/kubectl/cmd/expose.go:111 +msgid "The name for the newly created object." +msgstr "The name for the newly created object." + +#: pkg/kubectl/cmd/autoscale.go:72 +msgid "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." +msgstr "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." + +#: pkg/kubectl/cmd/run.go:116 +msgid "" +"The name of the API generator to use, see http://kubernetes.io/docs/user-" +"guide/kubectl-conventions/#generators for a list." +msgstr "" +"The name of the API generator to use, see http://kubernetes.io/docs/user-" +"guide/kubectl-conventions/#generators for a list." + +#: pkg/kubectl/cmd/autoscale.go:67 +msgid "" +"The name of the API generator to use. Currently there is only 1 generator." +msgstr "" +"The name of the API generator to use. Currently there is only 1 generator." + +#: pkg/kubectl/cmd/expose.go:99 +msgid "" +"The name of the API generator to use. There are 2 generators: 'service/v1' " +"and 'service/v2'. The only difference between them is that service port in " +"v1 is named 'default', while it is left unnamed in v2. Default is 'service/" +"v2'." +msgstr "" +"The name of the API generator to use. There are 2 generators: ‘service/v1’ " +"and ‘service/v2’. The only difference between them is that service port in " +"v1 is named ‘default’, while it is left unnamed in v2. Default is ‘service/" +"v2’." + +#: pkg/kubectl/cmd/run.go:136 +msgid "" +"The name of the generator to use for creating a service. Only used if --" +"expose is true" +msgstr "" +"The name of the generator to use for creating a service. Only used if —" +"expose is true" + +#: pkg/kubectl/cmd/expose.go:100 +msgid "The network protocol for the service to be created. Default is 'TCP'." +msgstr "The network protocol for the service to be created. Default is ‘TCP’." + +#: pkg/kubectl/cmd/expose.go:101 +msgid "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" +msgstr "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" + +#: pkg/kubectl/cmd/run.go:124 +msgid "" +"The port that this container exposes. If --expose is true, this is also the " +"port used by the service that is created." +msgstr "" +"The port that this container exposes. If —expose is true, this is also the " +"port used by the service that is created." + +#: pkg/kubectl/cmd/run.go:134 +msgid "" +"The resource requirement limits for this container. For example, 'cpu=200m," +"memory=512Mi'. Note that server side components may assign limits depending " +"on the server configuration, such as limit ranges." +msgstr "" +"The resource requirement limits for this container. For example, ‘cpu=200m," +"memory=512Mi’. Note that server side components may assign limits depending " +"on the server configuration, such as limit ranges." + +#: pkg/kubectl/cmd/run.go:133 +msgid "" +"The resource requirement requests for this container. For example, " +"'cpu=100m,memory=256Mi'. Note that server side components may assign " +"requests depending on the server configuration, such as limit ranges." +msgstr "" +"The resource requirement requests for this container. For example, " +"‘cpu=100m,memory=256Mi’. Note that server side components may assign " +"requests depending on the server configuration, such as limit ranges." + +#: pkg/kubectl/cmd/run.go:131 +msgid "" +"The restart policy for this Pod. Legal values [Always, OnFailure, Never]. " +"If set to 'Always' a deployment is created, if set to 'OnFailure' a job is " +"created, if set to 'Never', a regular pod is created. For the latter two --" +"replicas must be 1. Default 'Always', for CronJobs `Never`." +msgstr "" +"The restart policy for this Pod. Legal values [Always, OnFailure, Never]. " +"If set to ‘Always’ a deployment is created, if set to ‘OnFailure’ a job is " +"created, if set to ‘Never’, a regular pod is created. For the latter two —" +"replicas must be 1. Default ‘Always’, for CronJobs `Never`." + +#: pkg/kubectl/cmd/create_secret.go:88 +msgid "The type of secret to create" +msgstr "The type of secret to create" + +#: pkg/kubectl/cmd/expose.go:102 +msgid "" +"Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is " +"'ClusterIP'." +msgstr "" +"Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is " +"‘ClusterIP’." + +#: pkg/kubectl/cmd/rollout/rollout_undo.go:72 +msgid "Undo a previous rollout" +msgstr "現在のロールアウトを取り消す" + # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/unset.go#L47 +#: pkg/kubectl/cmd/config/unset.go:48 msgid "Unsets an individual value in a kubeconfig file" msgstr "kubeconfigファイルから変数を個別に削除する" +#: pkg/kubectl/cmd/patch.go:96 +msgid "Update field(s) of a resource using strategic merge patch" +msgstr "Update field(s) of a resource using strategic merge patch" + +#: pkg/kubectl/cmd/set/set_image.go:95 +msgid "Update image of a pod template" +msgstr "Update image of a pod template" + +#: pkg/kubectl/cmd/set/set_resources.go:102 +msgid "Update resource requests/limits on objects with pod templates" +msgstr "Update resource requests/limits on objects with pod templates" + +#: pkg/kubectl/cmd/annotate.go:116 msgid "Update the annotations on a resource" msgstr "リソースのアノテーションを更新する" +#: pkg/kubectl/cmd/label.go:114 +msgid "Update the labels on a resource" +msgstr "リソースのラベルを更新する" + +#: pkg/kubectl/cmd/taint.go:87 +msgid "Update the taints on one or more nodes" +msgstr "Update the taints on one or more nodes" + +#: pkg/kubectl/cmd/create_secret.go:156 +msgid "Username for Docker registry authentication" +msgstr "Username for Docker registry authentication" + +#: pkg/kubectl/cmd/apply_view_last_applied.go:64 +msgid "View latest last-applied-configuration annotations of a resource/object" +msgstr "" +"View latest last-applied-configuration annotations of a resource/object" + +#: pkg/kubectl/cmd/rollout/rollout_history.go:52 +msgid "View rollout history" +msgstr "ロールアウトの履歴を表示する" + +#: pkg/kubectl/cmd/clusterinfo_dump.go:46 msgid "" -"watch is only supported on individual resources and resource collections - " -"%d resources were found" -msgid_plural "" -"watch is only supported on individual resources and resource collections - " -"%d resources were found" -msgstr[0] "" -"watchは単一リソース及びリソースコレクションのみサポートしています - " -"%d個のリソースが見つかりました" -msgstr[1] "" -"watchは単一リソース及びリソースコレクションのみサポートしています - " -"%d個のリソースが見つかりました" +"Where to output the files. If empty or '-' uses stdout, otherwise creates a " +"directory hierarchy in that directory" +msgstr "" +"Where to output the files. If empty or ‘-‘ uses stdout, otherwise creates a " +"directory hierarchy in that directory" + +#: pkg/kubectl/cmd/run_test.go:85 +msgid "dummy restart flag)" +msgstr "dummy restart flag)" + +#: pkg/kubectl/cmd/create_service.go:254 +msgid "external name of service" +msgstr "external name of service" + +#: pkg/kubectl/cmd/cmd.go:227 +msgid "kubectl controls the Kubernetes cluster manager" +msgstr "kubectl controls the Kubernetes cluster manager" + +#~ msgid "" +#~ "watch is only supported on individual resources and resource collections " +#~ "- %d resources were found" +#~ msgid_plural "" +#~ "watch is only supported on individual resources and resource collections " +#~ "- %d resources were found" +#~ msgstr[0] "" +#~ "watchは単一リソース及びリソースコレクションのみサポートしています - %d個の" +#~ "リソースが見つかりました" +#~ msgstr[1] "" +#~ "watchは単一リソース及びリソースコレクションのみサポートしています - %d個の" +#~ "リソースが見つかりました"