Merge pull request #42147 from bowei/ip-alias-2

Automatic merge from submit-queue

Add support for IP aliases for pod IPs (GCP alpha feature)

```release-note
Adds support for allocation of pod IPs via IP aliases.

# Adds KUBE_GCE_ENABLE_IP_ALIASES flag to the cluster up scripts (`kube-{up,down}.sh`).

KUBE_GCE_ENABLE_IP_ALIASES=true will enable allocation of PodCIDR ips
using the ip alias mechanism rather than using routes. This feature is currently
only available on GCE.

## Usage
$ CLUSTER_IP_RANGE=10.100.0.0/16 KUBE_GCE_ENABLE_IP_ALIASES=true bash -x cluster/kube-up.sh

# Adds CloudAllocator to the node CIDR allocator (kubernetes-controller manager).

If CIDRAllocatorType is set to `CloudCIDRAllocator`, then allocation
of CIDR allocation instead is done by the external cloud provider and
the node controller is only responsible for reflecting the allocation
into the node spec.

- Splits off the rangeAllocator from the cidr_allocator.go file.
- Adds cloudCIDRAllocator, which is used when the cloud provider allocates
  the CIDR ranges externally. (GCE support only)
- Updates RBAC permission for node controller to include PATCH
```
pull/6/head
Kubernetes Submit Queue 2017-04-11 22:09:24 -07:00 committed by GitHub
commit ceccd305ce
42 changed files with 113612 additions and 2573 deletions

20
Godeps/Godeps.json generated
View File

@ -2673,35 +2673,39 @@
},
{
"ImportPath": "google.golang.org/api/cloudmonitoring/v2beta2",
"Rev": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb"
"Rev": "64485db7e8c8be51e572801d06cdbcfadd3546c1"
},
{
"ImportPath": "google.golang.org/api/compute/v0.alpha",
"Rev": "64485db7e8c8be51e572801d06cdbcfadd3546c1"
},
{
"ImportPath": "google.golang.org/api/compute/v1",
"Rev": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb"
"Rev": "64485db7e8c8be51e572801d06cdbcfadd3546c1"
},
{
"ImportPath": "google.golang.org/api/container/v1",
"Rev": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb"
"Rev": "64485db7e8c8be51e572801d06cdbcfadd3546c1"
},
{
"ImportPath": "google.golang.org/api/dns/v1",
"Rev": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb"
"Rev": "64485db7e8c8be51e572801d06cdbcfadd3546c1"
},
{
"ImportPath": "google.golang.org/api/gensupport",
"Rev": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb"
"Rev": "64485db7e8c8be51e572801d06cdbcfadd3546c1"
},
{
"ImportPath": "google.golang.org/api/googleapi",
"Rev": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb"
"Rev": "64485db7e8c8be51e572801d06cdbcfadd3546c1"
},
{
"ImportPath": "google.golang.org/api/googleapi/internal/uritemplates",
"Rev": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb"
"Rev": "64485db7e8c8be51e572801d06cdbcfadd3546c1"
},
{
"ImportPath": "google.golang.org/api/logging/v2beta1",
"Rev": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb"
"Rev": "64485db7e8c8be51e572801d06cdbcfadd3546c1"
},
{
"ImportPath": "google.golang.org/appengine",

35
Godeps/LICENSES generated
View File

@ -81511,6 +81511,41 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================================
================================================================================
= vendor/google.golang.org/api/compute/v0.alpha licensed under: =
Copyright (c) 2011 Google Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= vendor/google.golang.org/api/LICENSE a651bb3d8b1c412632e28823bb432b40 -
================================================================================
================================================================================
= vendor/google.golang.org/api/compute/v1 licensed under: =

View File

@ -723,6 +723,17 @@ EOF
FEATURE_GATES: $(yaml-quote ${FEATURE_GATES})
EOF
fi
if [ -n "${PROVIDER_VARS:-}" ]; then
local var_name
local var_value
for var_name in ${PROVIDER_VARS}; do
eval "local var_value=\$(yaml-quote \${${var_name}})"
echo "${var_name}: ${var_value}" >>$file
done
fi
if [[ "${master}" == "true" ]]; then
# Master-only env vars.
cat >>$file <<EOF

View File

@ -77,15 +77,16 @@ INITIAL_ETCD_CLUSTER="${MASTER_NAME}"
ETCD_QUORUM_READ="${ENABLE_ETCD_QUORUM_READ:-false}"
MASTER_TAG="${INSTANCE_PREFIX}-master"
NODE_TAG="${INSTANCE_PREFIX}-minion"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/14}"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
if [[ "${FEDERATION:-}" == true ]]; then
NODE_SCOPES="${NODE_SCOPES:-compute-rw,monitoring,logging-write,storage-ro,https://www.googleapis.com/auth/ndev.clouddns.readwrite}"
else
NODE_SCOPES="${NODE_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}"
fi
# Extra docker options for nodes.
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-}"
@ -173,6 +174,25 @@ fi
# Optional: Enable Rescheduler
ENABLE_RESCHEDULER="${KUBE_ENABLE_RESCHEDULER:-true}"
# Optional: Enable allocation of pod IPs using IP aliases.
#
# ALPHA FEATURE.
#
# IP_ALIAS_SIZE is the size of the podCIDR allocated to a node.
# IP_ALIAS_SUBNETWORK is the subnetwork to allocate from. If empty, a
# new subnetwork will be created for the cluster.
ENABLE_IP_ALIASES=${KUBE_GCE_ENABLE_IP_ALIASES:-false}
if [ ${ENABLE_IP_ALIASES} = true ]; then
# Size of ranges allocated to each node. gcloud alpha supports only /32 and /24.
IP_ALIAS_SIZE=${KUBE_GCE_IP_ALIAS_SIZE:-/24}
IP_ALIAS_SUBNETWORK=${KUBE_GCE_IP_ALIAS_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-default}
# NODE_IP_RANGE is used when ENABLE_IP_ALIASES=true. It is the primary range in
# the subnet and is the range used for node instance IPs.
NODE_IP_RANGE="${NODE_IP_RANGE:-10.40.0.0/22}"
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS} ENABLE_IP_ALIASES"
fi
# Admission Controllers to invoke prior to persisting objects in cluster
# If we included ResourceQuota, we should keep it at the end of the list to prevent incrementing quota usage prematurely.
ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,ResourceQuota

View File

@ -79,8 +79,13 @@ INITIAL_ETCD_CLUSTER="${MASTER_NAME}"
ETCD_QUORUM_READ="${ENABLE_ETCD_QUORUM_READ:-false}"
MASTER_TAG="${INSTANCE_PREFIX}-master"
NODE_TAG="${INSTANCE_PREFIX}-minion"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.180.0.0/14}"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
# NODE_IP_RANGE is used when ENABLE_IP_ALIASES=true. It is the primary range in
# the subnet and is the range used for node instance IPs.
NODE_IP_RANGE="${NODE_IP_RANGE:-10.40.0.0/22}"
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}"
# Optional: set feature gates
@ -198,6 +203,25 @@ fi
# Optional: Enable Rescheduler
ENABLE_RESCHEDULER="${KUBE_ENABLE_RESCHEDULER:-true}"
# Optional: Enable allocation of pod IPs using IP aliases.
#
# ALPHA FEATURE.
#
# IP_ALIAS_SIZE is the size of the podCIDR allocated to a node.
# IP_ALIAS_SUBNETWORK is the subnetwork to allocate from. If empty, a
# new subnetwork will be created for the cluster.
ENABLE_IP_ALIASES=${KUBE_GCE_ENABLE_IP_ALIASES:-false}
if [ ${ENABLE_IP_ALIASES} = true ]; then
# Size of ranges allocated to each node. gcloud alpha supports only /32 and /24.
IP_ALIAS_SIZE=${KUBE_GCE_IP_ALIAS_SIZE:-/24}
IP_ALIAS_SUBNETWORK=${KUBE_GCE_IP_ALIAS_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-default}
# NODE_IP_RANGE is used when ENABLE_IP_ALIASES=true. It is the primary range in
# the subnet and is the range used for node instance IPs.
NODE_IP_RANGE="${NODE_IP_RANGE:-10.40.0.0/22}"
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS} ENABLE_IP_ALIASES"
fi
# If we included ResourceQuota, we should keep it at the end of the list to prevent incrementing quota usage prematurely.
ADMISSION_CONTROL="${KUBE_ADMISSION_CONTROL:-NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,PodPreset,DefaultStorageClass,DefaultTolerationSeconds,ResourceQuota}"

View File

@ -585,6 +585,11 @@ EOF
if [ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
scheduling_algorithm_provider: '$(echo "${SCHEDULING_ALGORITHM_PROVIDER}" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${ENABLE_IP_ALIASES:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
enable_ip_aliases: '$(echo "$ENABLE_IP_ALIASES" | sed -e "s/'/''/g")'
EOF
fi
}

View File

@ -997,6 +997,10 @@ function start-kube-controller-manager {
if [[ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]]; then
params+=" --terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}"
fi
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
params+=" --cidr-allocator-type=CloudAllocator"
params+=" --configure-cloud-routes=false"
fi
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
fi

View File

@ -31,11 +31,11 @@ source "${KUBE_ROOT}/cluster/gce/container-linux/helper.sh"
# detect-project
# get-bearer-token
function create-master-instance {
local address_opt=""
[[ -n ${1:-} ]] && address_opt="--address ${1}"
local address=""
[[ -n ${1:-} ]] && address="${1}"
write-master-env
create-master-instance-internal "${MASTER_NAME}" "${address_opt}"
create-master-instance-internal "${MASTER_NAME}" "${address}"
}
function replicate-master-instance() {
@ -65,38 +65,58 @@ function replicate-master-instance() {
function create-master-instance-internal() {
local gcloud="gcloud"
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
gcloud="gcloud alpha"
fi
local -r master_name="${1}"
local -r address_option="${2:-}"
local -r address="${2:-}"
local preemptible_master=""
if [[ "${PREEMPTIBLE_MASTER:-}" == "true" ]]; then
preemptible_master="--preemptible --maintenance-policy TERMINATE"
fi
gcloud compute instances create "${master_name}" \
${address_option} \
local network=$(make-gcloud-network-argument \
"${NETWORK}" "${address:-}" \
"${ENABLE_IP_ALIASES:-}" "${IP_ALIAS_SUBNETWORK:-}" "${IP_ALIAS_SIZE:-}")
local metadata="kube-env=${KUBE_TEMP}/master-kube-env.yaml"
metadata="${metadata},user-data=${KUBE_ROOT}/cluster/gce/container-linux/master.yaml"
metadata="${metadata},configure-sh=${KUBE_ROOT}/cluster/gce/container-linux/configure.sh"
metadata="${metadata},cluster-name=${KUBE_TEMP}/cluster-name.txt"
local disk="name=${master_name}-pd"
disk="${disk},device-name=master-pd"
disk="${disk},mode=rw"
disk="${disk},boot=no"
disk="${disk},auto-delete=no"
${gcloud} compute instances create "${master_name}" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--machine-type "${MASTER_SIZE}" \
--image-project="${MASTER_IMAGE_PROJECT}" \
--image "${MASTER_IMAGE}" \
--tags "${MASTER_TAG}" \
--network "${NETWORK}" \
--scopes "storage-ro,compute-rw,monitoring,logging-write" \
--can-ip-forward \
--metadata-from-file \
"kube-env=${KUBE_TEMP}/master-kube-env.yaml,user-data=${KUBE_ROOT}/cluster/gce/container-linux/master.yaml,configure-sh=${KUBE_ROOT}/cluster/gce/container-linux/configure.sh,cluster-name=${KUBE_TEMP}/cluster-name.txt" \
--disk "name=${master_name}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no" \
--metadata-from-file "${metadata}" \
--disk "${disk}" \
--boot-disk-size "${MASTER_ROOT_DISK_SIZE:-30}" \
${preemptible_master}
${preemptible_master} \
${network}
}
function get-metadata() {
local zone="${1}"
local name="${2}"
local key="${3}"
local metadata_url="http://metadata.google.internal/computeMetadata/v1/instance/attributes/${key}"
gcloud compute ssh "${name}" \
--project "${PROJECT}" \
--zone "${zone}" \
--command "curl \"http://metadata.google.internal/computeMetadata/v1/instance/attributes/${key}\" -H \"Metadata-Flavor: Google\"" 2>/dev/null
--command "curl '${metadata_url}' -H 'Metadata-Flavor: Google'" 2>/dev/null
}

View File

@ -1205,6 +1205,10 @@ function start-kube-controller-manager {
if [[ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]]; then
params+=" --terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}"
fi
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
params+=" --cidr-allocator-type=CloudAllocator"
params+=" --configure-cloud-routes=false"
fi
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
fi

View File

@ -31,12 +31,12 @@ source "${KUBE_ROOT}/cluster/gce/gci/helper.sh"
# detect-project
# get-bearer-token
function create-master-instance {
local address_opt=""
[[ -n ${1:-} ]] && address_opt="--address ${1}"
local address=""
[[ -n ${1:-} ]] && address="${1}"
write-master-env
ensure-gci-metadata-files
create-master-instance-internal "${MASTER_NAME}" "${address_opt}"
create-master-instance-internal "${MASTER_NAME}" "${address}"
}
function replicate-master-instance() {
@ -74,30 +74,51 @@ function replicate-master-instance() {
function create-master-instance-internal() {
local gcloud="gcloud"
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
gcloud="gcloud alpha"
fi
local -r master_name="${1}"
local -r address_option="${2:-}"
local -r address="${2:-}"
local preemptible_master=""
if [[ "${PREEMPTIBLE_MASTER:-}" == "true" ]]; then
preemptible_master="--preemptible --maintenance-policy TERMINATE"
fi
gcloud compute instances create "${master_name}" \
${address_option} \
local network=$(make-gcloud-network-argument \
"${NETWORK}" "${address:-}" \
"${ENABLE_IP_ALIASES:-}" "${IP_ALIAS_SUBNETWORK:-}" "${IP_ALIAS_SIZE:-}")
local metadata="kube-env=${KUBE_TEMP}/master-kube-env.yaml"
metadata="${metadata},user-data=${KUBE_ROOT}/cluster/gce/gci/master.yaml"
metadata="${metadata},configure-sh=${KUBE_ROOT}/cluster/gce/gci/configure.sh"
metadata="${metadata},cluster-name=${KUBE_TEMP}/cluster-name.txt"
metadata="${metadata},gci-update-strategy=${KUBE_TEMP}/gci-update.txt"
metadata="${metadata},gci-ensure-gke-docker=${KUBE_TEMP}/gci-ensure-gke-docker.txt"
metadata="${metadata},gci-docker-version=${KUBE_TEMP}/gci-docker-version.txt"
metadata="${metadata},kube-master-certs=${KUBE_TEMP}/kube-master-certs.yaml"
local disk="name=${master_name}-pd"
disk="${disk},device-name=master-pd"
disk="${disk},mode=rw"
disk="${disk},boot=no"
disk="${disk},auto-delete=no"
${gcloud} compute instances create "${master_name}" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--machine-type "${MASTER_SIZE}" \
--image-project="${MASTER_IMAGE_PROJECT}" \
--image "${MASTER_IMAGE}" \
--tags "${MASTER_TAG}" \
--network "${NETWORK}" \
--scopes "storage-ro,compute-rw,monitoring,logging-write" \
--can-ip-forward \
--metadata-from-file \
"kube-env=${KUBE_TEMP}/master-kube-env.yaml,user-data=${KUBE_ROOT}/cluster/gce/gci/master.yaml,configure-sh=${KUBE_ROOT}/cluster/gce/gci/configure.sh,cluster-name=${KUBE_TEMP}/cluster-name.txt,gci-update-strategy=${KUBE_TEMP}/gci-update.txt,gci-ensure-gke-docker=${KUBE_TEMP}/gci-ensure-gke-docker.txt,gci-docker-version=${KUBE_TEMP}/gci-docker-version.txt,kube-master-certs=${KUBE_TEMP}/kube-master-certs.yaml" \
--disk "name=${master_name}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no" \
--metadata-from-file "${metadata}" \
--disk "${disk}" \
--boot-disk-size "${MASTER_ROOT_DISK_SIZE:-10}" \
${preemptible_master}
${preemptible_master} \
${network}
}
function get-metadata() {

View File

@ -449,6 +449,35 @@ function create-firewall-rule() {
done
}
# Format the string argument for gcloud network.
function make-gcloud-network-argument() {
local network="$1"
local address="$2" # optional
local enable_ip_alias="$3" # optional
local alias_subnetwork="$4" # optional
local alias_size="$5" # optional
local ret=""
if [[ "${enable_ip_alias}" == 'true' ]]; then
ret="--network-interface"
ret="${ret} network=${network}"
# If address is omitted, instance will not receive an external IP.
ret="${ret},address=${address:-}"
ret="${ret},subnet=${alias_subnetwork}"
ret="${ret},aliases=pods-default:${alias_size}"
ret="${ret} --no-can-ip-forward"
else
ret="--network ${network}"
ret="${ret} --can-ip-forward"
if [[ -n ${address:-} ]]; then
ret="${ret} --address ${address}"
fi
fi
echo "${ret}"
}
# $1: version (required)
function get-template-name-from-version() {
# trim template name to pass gce name validation
@ -475,20 +504,34 @@ function create-node-template() {
fi
fi
local attempt=1
local gcloud="gcloud"
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
gcloud="gcloud alpha"
fi
local preemptible_minions=""
if [[ "${PREEMPTIBLE_NODE}" == "true" ]]; then
preemptible_minions="--preemptible --maintenance-policy TERMINATE"
fi
local local_ssds=""
if [ ! -z ${NODE_LOCAL_SSDS+x} ]; then
for i in $(seq ${NODE_LOCAL_SSDS}); do
local_ssds="$local_ssds--local-ssd=interface=SCSI "
done
fi
local network=$(make-gcloud-network-argument \
"${NETWORK}" "" \
"${ENABLE_IP_ALIASES:-}" \
"${IP_ALIAS_SUBNETWORK:-}" \
"${IP_ALIAS_SIZE:-}")
local attempt=1
while true; do
echo "Attempt ${attempt} to create ${1}" >&2
if ! gcloud compute instance-templates create "$template_name" \
if ! ${gcloud} compute instance-templates create \
"$template_name" \
--project "${PROJECT}" \
--machine-type "${NODE_SIZE}" \
--boot-disk-type "${NODE_DISK_TYPE}" \
@ -496,11 +539,11 @@ function create-node-template() {
--image-project="${NODE_IMAGE_PROJECT}" \
--image "${NODE_IMAGE}" \
--tags "${NODE_TAG}" \
--network "${NETWORK}" \
${local_ssds} \
--region "${REGION}" \
${network} \
${preemptible_minions} \
$2 \
--can-ip-forward \
--metadata-from-file $(echo ${@:3} | tr ' ' ',') >&2; then
if (( attempt > 5 )); then
echo -e "${color_red}Failed to create instance template $template_name ${color_norm}" >&2
@ -597,6 +640,7 @@ function kube-up() {
if [[ ${KUBE_USE_EXISTING_MASTER:-} == "true" ]]; then
detect-master
parse-master-env
create-subnetwork
create-nodes
elif [[ ${KUBE_REPLICATE_EXISTING_MASTER:-} == "true" ]]; then
if [[ "${MASTER_OS_DISTRIBUTION}" != "gci" && "${MASTER_OS_DISTRIBUTION}" != "debian" ]]; then
@ -612,6 +656,7 @@ function kube-up() {
else
check-existing
create-network
create-subnetwork
write-cluster-name
create-autoscaler-config
create-master
@ -680,6 +725,48 @@ function create-network() {
fi
}
function create-subnetwork() {
case ${ENABLE_IP_ALIASES} in
true) ;;
false) return;;
*) echo "${color_red}Invalid argument to ENABLE_IP_ALIASES${color_norm}"
exit 1;;
esac
# Look for the subnet, it must exist and have a secondary range
# configured.
local subnet=$(gcloud alpha compute networks subnets describe \
--region ${REGION} ${IP_ALIAS_SUBNETWORK} 2>/dev/null)
if [[ -z ${subnet} ]]; then
# Only allow auto-creation for default subnets
if [[ ${IP_ALIAS_SUBNETWORK} != ${INSTANCE_PREFIX}-subnet-default ]]; then
echo "${color_red}Subnetwork ${NETWORK}:${IP_ALIAS_SUBNETWORK} does not exist${color_norm}"
exit 1
fi
if [ -z ${NODE_IP_RANGE:-} ]; then
echo "${color_red}NODE_IP_RANGE must be specified{color_norm}"
exit 1
fi
echo "Creating subnet ${NETWORK}:${IP_ALIAS_SUBNETWORK}"
gcloud alpha compute networks subnets create \
${IP_ALIAS_SUBNETWORK} \
--description "Automatically generated subnet for ${INSTANCE_PREFIX} cluster. This will be removed on cluster teardown." \
--network ${NETWORK} \
--region ${REGION} \
--range ${NODE_IP_RANGE} \
--secondary-range "name=pods-default,range=${CLUSTER_IP_RANGE}"
echo "Created subnetwork ${IP_ALIAS_SUBNETWORK}"
else
if ! echo ${subnet} | grep --quiet secondaryIpRanges ${subnet}; then
echo "${color_red}Subnet ${IP_ALIAS_SUBNETWORK} does not have a secondary range${color_norm}"
exit 1
fi
fi
}
function delete-firewall-rules() {
for fw in $@; do
if [[ -n $(gcloud compute firewall-rules --project "${PROJECT}" describe "${fw}" --format='value(name)' 2>/dev/null || true) ]]; then
@ -701,6 +788,24 @@ function delete-network() {
fi
}
function delete-subnetwork() {
if [[ ${ENABLE_IP_ALIASES:-} != "true" ]]; then
return
fi
# Only delete automatically created subnets.
if [[ ${IP_ALIAS_SUBNETWORK} != ${INSTANCE_PREFIX}-subnet-default ]]; then
return
fi
echo "Removing auto-created subnet ${NETWORK}:${IP_ALIAS_SUBNETWORK}"
if [[ -n $(gcloud alpha compute networks subnets describe \
--region ${REGION} ${IP_ALIAS_SUBNETWORK} 2>/dev/null) ]]; then
gcloud alpha --quiet compute networks subnets delete \
--region ${REGION} ${IP_ALIAS_SUBNETWORK}
fi
}
# Assumes:
# NUM_NODES
# Sets:
@ -1414,6 +1519,9 @@ function kube-down() {
"${CLUSTER_NAME}-default-internal-node" \
"${NETWORK}-default-ssh" \
"${NETWORK}-default-internal" # Pre-1.5 clusters
delete-subnetwork
if [[ "${KUBE_DELETE_NETWORK}" == "true" ]]; then
delete-network || true # might fail if there are leaked firewall rules
fi

View File

@ -32,7 +32,6 @@ fi
source "${KUBE_ROOT}/cluster/kube-util.sh"
if [ -z "${ZONE-}" ]; then
echo "... Starting cluster using provider: ${KUBERNETES_PROVIDER}" >&2
else

View File

@ -28,6 +28,12 @@ else
KUBERNETES_PROVIDER="${KUBERNETES_PROVIDER:-gce}"
fi
# PROVIDER_VARS is a list of cloud provider specific variables. Note:
# this is a list of the _names_ of the variables, not the value of the
# variables. Providers can add variables to be appended to kube-env.
# (see `build-kube-env`).
PROVIDER_VARS=""
PROVIDER_UTILS="${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh"
if [ -f ${PROVIDER_UTILS} ]; then
source "${PROVIDER_UTILS}"

View File

@ -477,6 +477,7 @@ func StartControllers(controllers map[string]InitFunc, s *options.CMServer, root
serviceCIDR,
int(s.NodeCIDRMaskSize),
s.AllocateNodeCIDRs,
nodecontroller.CIDRAllocatorType(s.CIDRAllocatorType),
s.EnableTaintManager,
utilfeature.DefaultFeatureGate.Enabled(features.TaintBasedEvictions),
)

View File

@ -188,7 +188,10 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet, allControllers []string, disabled
fs.StringVar(&s.ClusterCIDR, "cluster-cidr", s.ClusterCIDR, "CIDR Range for Pods in cluster.")
fs.StringVar(&s.ServiceCIDR, "service-cluster-ip-range", s.ServiceCIDR, "CIDR Range for Services in cluster.")
fs.Int32Var(&s.NodeCIDRMaskSize, "node-cidr-mask-size", s.NodeCIDRMaskSize, "Mask size for node cidr in cluster.")
fs.BoolVar(&s.AllocateNodeCIDRs, "allocate-node-cidrs", false, "Should CIDRs for Pods be allocated and set on the cloud provider.")
fs.BoolVar(&s.AllocateNodeCIDRs, "allocate-node-cidrs", false,
"Should CIDRs for Pods be allocated and set on the cloud provider.")
fs.StringVar(&s.CIDRAllocatorType, "cidr-allocator-type", "RangeAllocator",
"Type of CIDR allocator to use")
fs.BoolVar(&s.ConfigureCloudRoutes, "configure-cloud-routes", true, "Should CIDRs allocated by allocate-node-cidrs be configured on the cloud provider.")
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.")

View File

@ -29,6 +29,10 @@ api-server-port
api-server-port
api-servers
api-servers
apiserver-count
apiserver-count
api-server-port
api-servers
api-server-service-type
api-token
api-version
@ -80,6 +84,7 @@ cgroup-driver
cgroup-root
cgroups-per-qos
chaos-chance
cidr-allocator-type
clean-start
cleanup
cleanup-iptables
@ -401,6 +406,8 @@ kube-master-url
kube-reserved
kube-reserved
kube-reserved-cgroup
kube-master-url
kube-reserved
kubernetes-anywhere-cluster
kubernetes-anywhere-path
kubernetes-anywhere-phase2-provider
@ -691,6 +698,9 @@ use-service-account-credentials
user-whitelist
use-service-account-credentials
use-service-account-credentials
user-whitelist
use-service-account-credentials
use-taint-based-evictions
verb
verify-only
versioned-clientset-package

View File

@ -794,9 +794,11 @@ type KubeControllerManagerConfiguration struct {
ServiceCIDR string
// NodeCIDRMaskSize is the mask size for node cidr in cluster.
NodeCIDRMaskSize int32
// allocateNodeCIDRs enables CIDRs for Pods to be allocated and, if
// AllocateNodeCIDRs enables CIDRs for Pods to be allocated and, if
// ConfigureCloudRoutes is true, to be set on the cloud provider.
AllocateNodeCIDRs bool
// CIDRAllocatorType determines what kind of pod CIDR allocator will be used.
CIDRAllocatorType string
// configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs
// to be configured on the cloud provider.
ConfigureCloudRoutes bool

View File

@ -46,6 +46,7 @@ go_library(
"//vendor:golang.org/x/net/context",
"//vendor:golang.org/x/oauth2",
"//vendor:golang.org/x/oauth2/google",
"//vendor:google.golang.org/api/compute/v0.alpha",
"//vendor:google.golang.org/api/compute/v1",
"//vendor:google.golang.org/api/container/v1",
"//vendor:google.golang.org/api/gensupport",

View File

@ -36,6 +36,7 @@ import (
"github.com/golang/glog"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
computealpha "google.golang.org/api/compute/v0.alpha"
compute "google.golang.org/api/compute/v1"
container "google.golang.org/api/container/v1"
"google.golang.org/api/gensupport"
@ -77,6 +78,7 @@ const (
// GCECloud is an implementation of Interface, LoadBalancer and Instances for Google Compute Engine.
type GCECloud struct {
service *compute.Service
serviceAlpha *computealpha.Service
containerService *container.Service
projectID string
region string
@ -211,43 +213,29 @@ func newGCECloud(config io.Reader) (*GCECloud, error) {
func CreateGCECloud(projectID, region, zone string, managedZones []string, networkURL string, nodeTags []string,
nodeInstancePrefix string, tokenSource oauth2.TokenSource, useMetadataServer bool) (*GCECloud, error) {
if tokenSource == nil {
var err error
tokenSource, err = google.DefaultTokenSource(
oauth2.NoContext,
compute.CloudPlatformScope,
compute.ComputeScope)
glog.Infof("Using DefaultTokenSource %#v", tokenSource)
if err != nil {
return nil, err
}
} else {
glog.Infof("Using existing Token Source %#v", tokenSource)
}
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
if _, err := tokenSource.Token(); err != nil {
glog.Errorf("error fetching initial token: %v", err)
return false, nil
}
return true, nil
}); err != nil {
return nil, err
}
client := oauth2.NewClient(oauth2.NoContext, tokenSource)
svc, err := compute.New(client)
client, err := newOauthClient(tokenSource)
if err != nil {
return nil, err
}
containerSvc, err := container.New(client)
service, err := compute.New(client)
if err != nil {
return nil, err
}
client, err = newOauthClient(tokenSource)
serviceAlpha, err := computealpha.New(client)
if err != nil {
return nil, err
}
containerService, err := container.New(client)
if err != nil {
return nil, err
}
if networkURL == "" {
networkName, err := getNetworkNameViaAPICall(svc, projectID)
networkName, err := getNetworkNameViaAPICall(service, projectID)
if err != nil {
return nil, err
}
@ -255,7 +243,7 @@ func CreateGCECloud(projectID, region, zone string, managedZones []string, netwo
}
if len(managedZones) == 0 {
managedZones, err = getZonesForRegion(svc, projectID, region)
managedZones, err = getZonesForRegion(service, projectID, region)
if err != nil {
return nil, err
}
@ -267,8 +255,9 @@ func CreateGCECloud(projectID, region, zone string, managedZones []string, netwo
operationPollRateLimiter := flowcontrol.NewTokenBucketRateLimiter(10, 100) // 10 qps, 100 bucket size.
return &GCECloud{
service: svc,
containerService: containerSvc,
service: service,
serviceAlpha: serviceAlpha,
containerService: containerService,
projectID: projectID,
region: region,
localZone: zone,
@ -378,3 +367,31 @@ func getZonesForRegion(svc *compute.Service, projectID, region string) ([]string
}
return zones, nil
}
func newOauthClient(tokenSource oauth2.TokenSource) (*http.Client, error) {
if tokenSource == nil {
var err error
tokenSource, err = google.DefaultTokenSource(
oauth2.NoContext,
compute.CloudPlatformScope,
compute.ComputeScope)
glog.Infof("Using DefaultTokenSource %#v", tokenSource)
if err != nil {
return nil, err
}
} else {
glog.Infof("Using existing Token Source %#v", tokenSource)
}
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
if _, err := tokenSource.Token(); err != nil {
glog.Errorf("error fetching initial token: %v", err)
return false, nil
}
return true, nil
}); err != nil {
return nil, err
}
return oauth2.NewClient(oauth2.NoContext, tokenSource), nil
}

View File

@ -26,6 +26,7 @@ import (
"cloud.google.com/go/compute/metadata"
"github.com/golang/glog"
computealpha "google.golang.org/api/compute/v0.alpha"
compute "google.golang.org/api/compute/v1"
"k8s.io/apimachinery/pkg/types"
@ -51,6 +52,20 @@ func (gce *GCECloud) NodeAddresses(_ types.NodeName) ([]v1.NodeAddress, error) {
}, nil
}
// This method will not be called from the node that is requesting this ID.
// i.e. metadata service and other local methods cannot be used here
func (gce *GCECloud) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
return []v1.NodeAddress{}, errors.New("unimplemented")
}
// InstanceTypeByProviderID returns the cloudprovider instance type of the node
// with the specified unique providerID This method will not be called from the
// node that is requesting this ID. i.e. metadata service and other local
// methods cannot be used here
func (gce *GCECloud) InstanceTypeByProviderID(providerID string) (string, error) {
return "", errors.New("unimplemented")
}
// ExternalID returns the cloud provider ID of the node with the specified NodeName (deprecated).
func (gce *GCECloud) ExternalID(nodeName types.NodeName) (string, error) {
instanceName := mapNodeNameToInstanceName(nodeName)
@ -202,6 +217,31 @@ func (gce *GCECloud) CurrentNodeName(hostname string) (types.NodeName, error) {
return types.NodeName(hostname), nil
}
// AliasRanges returns a list of CIDR ranges that are assigned to the
// `node` for allocation to pods. Returns a list of the form
// "<ip>/<netmask>".
func (gce *GCECloud) AliasRanges(nodeName types.NodeName) (cidrs []string, err error) {
var instance *gceInstance
instance, err = gce.getInstanceByName(mapNodeNameToInstanceName(nodeName))
if err != nil {
return
}
var res *computealpha.Instance
res, err = gce.serviceAlpha.Instances.Get(
gce.projectID, instance.Zone, instance.Name).Do()
if err != nil {
return
}
for _, networkInterface := range res.NetworkInterfaces {
for _, aliasIpRange := range networkInterface.AliasIpRanges {
cidrs = append(cidrs, aliasIpRange.IpCidrRange)
}
}
return
}
// Gets the named instances, returning cloudprovider.InstanceNotFound if any instance is not found
func (gce *GCECloud) getInstancesByNames(names []string) ([]*gceInstance, error) {
instances := make(map[string]*gceInstance)
@ -351,17 +391,3 @@ func (gce *GCECloud) isCurrentInstance(instanceID string) bool {
return currentInstanceID == canonicalizeInstanceName(instanceID)
}
// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID
// This method will not be called from the node that is requesting this ID. i.e. metadata service
// and other local methods cannot be used here
func (gce *GCECloud) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
return []v1.NodeAddress{}, errors.New("unimplemented")
}
// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID
// This method will not be called from the node that is requesting this ID. i.e. metadata service
// and other local methods cannot be used here
func (gce *GCECloud) InstanceTypeByProviderID(providerID string) (string, error) {
return "", errors.New("unimplemented")
}

View File

@ -8,56 +8,6 @@ load(
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"cidr_allocator.go",
"cidr_set.go",
"controller_utils.go",
"doc.go",
"metrics.go",
"nodecontroller.go",
"rate_limited_queue.go",
"taint_controller.go",
"timed_workers.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/client/informers/informers_generated/externalversions/core/v1:go_default_library",
"//pkg/client/informers/informers_generated/externalversions/extensions/v1beta1:go_default_library",
"//pkg/client/listers/core/v1:go_default_library",
"//pkg/client/listers/extensions/v1beta1:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/kubelet/util/format:go_default_library",
"//pkg/util/metrics:go_default_library",
"//pkg/util/node:go_default_library",
"//pkg/util/system:go_default_library",
"//pkg/util/version:go_default_library",
"//vendor:github.com/golang/glog",
"//vendor:github.com/prometheus/client_golang/prometheus",
"//vendor:k8s.io/apimachinery/pkg/api/equality",
"//vendor:k8s.io/apimachinery/pkg/api/errors",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
"//vendor:k8s.io/apimachinery/pkg/fields",
"//vendor:k8s.io/apimachinery/pkg/labels",
"//vendor:k8s.io/apimachinery/pkg/types",
"//vendor:k8s.io/apimachinery/pkg/util/errors",
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
"//vendor:k8s.io/apimachinery/pkg/util/sets",
"//vendor:k8s.io/apimachinery/pkg/util/wait",
"//vendor:k8s.io/client-go/kubernetes/typed/core/v1",
"//vendor:k8s.io/client-go/pkg/api/v1",
"//vendor:k8s.io/client-go/tools/cache",
"//vendor:k8s.io/client-go/tools/record",
"//vendor:k8s.io/client-go/util/flowcontrol",
"//vendor:k8s.io/client-go/util/workqueue",
],
)
go_test(
name = "go_default_test",
srcs = [
@ -96,6 +46,59 @@ go_test(
],
)
go_library(
name = "go_default_library",
srcs = [
"cidr_allocator.go",
"cidr_set.go",
"cloud_cidr_allocator.go",
"controller_utils.go",
"doc.go",
"metrics.go",
"nodecontroller.go",
"range_allocator.go",
"rate_limited_queue.go",
"taint_controller.go",
"timed_workers.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/client/informers/informers_generated/externalversions/core/v1:go_default_library",
"//pkg/client/informers/informers_generated/externalversions/extensions/v1beta1:go_default_library",
"//pkg/client/listers/core/v1:go_default_library",
"//pkg/client/listers/extensions/v1beta1:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/gce:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/kubelet/util/format:go_default_library",
"//pkg/util/metrics:go_default_library",
"//pkg/util/node:go_default_library",
"//pkg/util/system:go_default_library",
"//pkg/util/version:go_default_library",
"//vendor:github.com/golang/glog",
"//vendor:github.com/prometheus/client_golang/prometheus",
"//vendor:k8s.io/apimachinery/pkg/api/equality",
"//vendor:k8s.io/apimachinery/pkg/api/errors",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
"//vendor:k8s.io/apimachinery/pkg/fields",
"//vendor:k8s.io/apimachinery/pkg/labels",
"//vendor:k8s.io/apimachinery/pkg/types",
"//vendor:k8s.io/apimachinery/pkg/util/errors",
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
"//vendor:k8s.io/apimachinery/pkg/util/sets",
"//vendor:k8s.io/apimachinery/pkg/util/wait",
"//vendor:k8s.io/client-go/kubernetes/typed/core/v1",
"//vendor:k8s.io/client-go/pkg/api/v1",
"//vendor:k8s.io/client-go/tools/cache",
"//vendor:k8s.io/client-go/tools/record",
"//vendor:k8s.io/client-go/util/flowcontrol",
"//vendor:k8s.io/client-go/util/workqueue",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),

View File

@ -18,259 +18,34 @@ package node
import (
"errors"
"fmt"
"net"
"sync"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
clientv1 "k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"github.com/golang/glog"
v1 "k8s.io/kubernetes/pkg/api/v1"
)
// TODO: figure out the good setting for those constants.
const (
// controls how many NodeSpec updates NC can process concurrently.
cidrUpdateWorkers = 10
cidrUpdateQueueSize = 5000
// podCIDRUpdateRetry controls the number of retries of writing Node.Spec.PodCIDR update.
podCIDRUpdateRetry = 5
)
var errCIDRRangeNoCIDRsRemaining = errors.New("CIDR allocation failed; there are no remaining CIDRs left to allocate in the accepted range")
var errCIDRRangeNoCIDRsRemaining = errors.New(
"CIDR allocation failed; there are no remaining CIDRs left to allocate in the accepted range")
type nodeAndCIDR struct {
cidr *net.IPNet
nodeName string
}
// CIDRAllocator is an interface implemented by things that know how to allocate/occupy/recycle CIDR for nodes.
// CIDRAllocatorType is the type of the allocator to use.
type CIDRAllocatorType string
const (
RangeAllocatorType CIDRAllocatorType = "RangeAllocator"
CloudAllocatorType CIDRAllocatorType = "CloudAllocator"
)
// CIDRAllocator is an interface implemented by things that know how to
// allocate/occupy/recycle CIDR for nodes.
type CIDRAllocator interface {
// AllocateOrOccupyCIDR looks at the given node, assigns it a valid
// CIDR if it doesn't currently have one or mark the CIDR as used if
// the node already have one.
AllocateOrOccupyCIDR(node *v1.Node) error
// ReleaseCIDR releases the CIDR of the removed node
ReleaseCIDR(node *v1.Node) error
}
type rangeAllocator struct {
client clientset.Interface
cidrs *cidrSet
clusterCIDR *net.IPNet
maxCIDRs int
// Channel that is used to pass updating Nodes with assigned CIDRs to the background
// This increases a throughput of CIDR assignment by not blocking on long operations.
nodeCIDRUpdateChannel chan nodeAndCIDR
recorder record.EventRecorder
// Keep a set of nodes that are currectly being processed to avoid races in CIDR allocation
sync.Mutex
nodesInProcessing sets.String
}
// NewCIDRRangeAllocator returns a CIDRAllocator to allocate CIDR for node
// Caller must ensure subNetMaskSize is not less than cluster CIDR mask size.
// Caller must always pass in a list of existing nodes so the new allocator
// can initialize its CIDR map. NodeList is only nil in testing.
func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) {
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: "cidrAllocator"})
eventBroadcaster.StartLogging(glog.Infof)
if client != nil {
glog.V(0).Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.Core().RESTClient()).Events("")})
} else {
glog.Fatalf("kubeClient is nil when starting NodeController")
}
ra := &rangeAllocator{
client: client,
cidrs: newCIDRSet(clusterCIDR, subNetMaskSize),
clusterCIDR: clusterCIDR,
nodeCIDRUpdateChannel: make(chan nodeAndCIDR, cidrUpdateQueueSize),
recorder: recorder,
nodesInProcessing: sets.NewString(),
}
if serviceCIDR != nil {
ra.filterOutServiceRange(serviceCIDR)
} else {
glog.V(0).Info("No Service CIDR provided. Skipping filtering out service addresses.")
}
if nodeList != nil {
for _, node := range nodeList.Items {
if node.Spec.PodCIDR == "" {
glog.Infof("Node %v has no CIDR, ignoring", node.Name)
continue
} else {
glog.Infof("Node %v has CIDR %s, occupying it in CIDR map", node.Name, node.Spec.PodCIDR)
}
if err := ra.occupyCIDR(&node); err != nil {
// This will happen if:
// 1. We find garbage in the podCIDR field. Retrying is useless.
// 2. CIDR out of range: This means a node CIDR has changed.
// This error will keep crashing controller-manager.
return nil, err
}
}
}
for i := 0; i < cidrUpdateWorkers; i++ {
go func(stopChan <-chan struct{}) {
for {
select {
case workItem, ok := <-ra.nodeCIDRUpdateChannel:
if !ok {
glog.Warning("NodeCIDRUpdateChannel read returned false.")
return
}
ra.updateCIDRAllocation(workItem)
case <-stopChan:
return
}
}
}(wait.NeverStop)
}
return ra, nil
}
func (r *rangeAllocator) insertNodeToProcessing(nodeName string) bool {
r.Lock()
defer r.Unlock()
if r.nodesInProcessing.Has(nodeName) {
return false
}
r.nodesInProcessing.Insert(nodeName)
return true
}
func (r *rangeAllocator) removeNodeFromProcessing(nodeName string) {
r.Lock()
defer r.Unlock()
r.nodesInProcessing.Delete(nodeName)
}
func (r *rangeAllocator) occupyCIDR(node *v1.Node) error {
defer r.removeNodeFromProcessing(node.Name)
if node.Spec.PodCIDR == "" {
return nil
}
_, podCIDR, err := net.ParseCIDR(node.Spec.PodCIDR)
if err != nil {
return fmt.Errorf("failed to parse node %s, CIDR %s", node.Name, node.Spec.PodCIDR)
}
if err := r.cidrs.occupy(podCIDR); err != nil {
return fmt.Errorf("failed to mark cidr as occupied: %v", err)
}
return nil
}
// AllocateOrOccupyCIDR looks at the given node, assigns it a valid CIDR
// if it doesn't currently have one or mark the CIDR as used if the node already have one.
// WARNING: If you're adding any return calls or defer any more work from this function
// you have to handle correctly nodesInProcessing.
func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
if node == nil {
return nil
}
if !r.insertNodeToProcessing(node.Name) {
glog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name)
return nil
}
if node.Spec.PodCIDR != "" {
return r.occupyCIDR(node)
}
podCIDR, err := r.cidrs.allocateNext()
if err != nil {
r.removeNodeFromProcessing(node.Name)
recordNodeStatusChange(r.recorder, node, "CIDRNotAvailable")
return fmt.Errorf("failed to allocate cidr: %v", err)
}
glog.V(10).Infof("Putting node %s with CIDR %s into the work queue", node.Name, podCIDR)
r.nodeCIDRUpdateChannel <- nodeAndCIDR{
nodeName: node.Name,
cidr: podCIDR,
}
return nil
}
// ReleaseCIDR releases the CIDR of the removed node
func (r *rangeAllocator) ReleaseCIDR(node *v1.Node) error {
if node == nil || node.Spec.PodCIDR == "" {
return nil
}
_, podCIDR, err := net.ParseCIDR(node.Spec.PodCIDR)
if err != nil {
return fmt.Errorf("Failed to parse CIDR %s on Node %v: %v", node.Spec.PodCIDR, node.Name, err)
}
glog.V(4).Infof("release CIDR %s", node.Spec.PodCIDR)
if err = r.cidrs.release(podCIDR); err != nil {
return fmt.Errorf("Error when releasing CIDR %v: %v", node.Spec.PodCIDR, err)
}
return err
}
// Marks all CIDRs with subNetMaskSize that belongs to serviceCIDR as used,
// so that they won't be assignable.
func (r *rangeAllocator) filterOutServiceRange(serviceCIDR *net.IPNet) {
// Checks if service CIDR has a nonempty intersection with cluster CIDR. It is the case if either
// clusterCIDR contains serviceCIDR with clusterCIDR's Mask applied (this means that clusterCIDR contains serviceCIDR)
// or vice versa (which means that serviceCIDR contains clusterCIDR).
if !r.clusterCIDR.Contains(serviceCIDR.IP.Mask(r.clusterCIDR.Mask)) && !serviceCIDR.Contains(r.clusterCIDR.IP.Mask(serviceCIDR.Mask)) {
return
}
if err := r.cidrs.occupy(serviceCIDR); err != nil {
glog.Errorf("Error filtering out service cidr %v: %v", serviceCIDR, err)
}
}
// Assigns CIDR to Node and sends an update to the API server.
func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
var err error
var node *v1.Node
defer r.removeNodeFromProcessing(data.nodeName)
for rep := 0; rep < podCIDRUpdateRetry; rep++ {
// TODO: change it to using PATCH instead of full Node updates.
node, err = r.client.Core().Nodes().Get(data.nodeName, metav1.GetOptions{})
if err != nil {
glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", data.nodeName, err)
continue
}
if node.Spec.PodCIDR != "" {
glog.Errorf("Node %v already has allocated CIDR %v. Releasing assigned one if different.", node.Name, node.Spec.PodCIDR)
if node.Spec.PodCIDR != data.cidr.String() {
if err := r.cidrs.release(data.cidr); err != nil {
glog.Errorf("Error when releasing CIDR %v", data.cidr.String())
}
}
return nil
}
node.Spec.PodCIDR = data.cidr.String()
if _, err := r.client.Core().Nodes().Update(node); err != nil {
glog.Errorf("Failed while updating Node.Spec.PodCIDR (%d retries left): %v", podCIDRUpdateRetry-rep-1, err)
} else {
break
}
}
if err != nil {
recordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed")
// We accept the fact that we may leek CIDRs here. This is safer than releasing
// them in case when we don't know if request went through.
// NodeController restart will return all falsely allocated CIDRs to the pool.
if !apierrors.IsServerTimeout(err) {
glog.Errorf("CIDR assignment for node %v failed: %v. Releasing allocated CIDR", data.nodeName, err)
if releaseErr := r.cidrs.release(data.cidr); releaseErr != nil {
glog.Errorf("Error releasing allocated CIDR for node %v: %v", data.nodeName, releaseErr)
}
}
}
return err
}

View File

@ -0,0 +1,143 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"fmt"
"sync"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientv1 "k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
nodeutil "k8s.io/kubernetes/pkg/util/node"
)
// cloudCIDRAllocator allocates node CIDRs according to IP address aliases
// assigned by the cloud provider. In this case, the allocation and
// deallocation is delegated to the external provider, and the controller
// merely takes the assignment and updates the node spec.
type cloudCIDRAllocator struct {
lock sync.Mutex
client clientset.Interface
cloud *gce.GCECloud
recorder record.EventRecorder
}
var _ CIDRAllocator = (*cloudCIDRAllocator)(nil)
func NewCloudCIDRAllocator(
client clientset.Interface,
cloud cloudprovider.Interface) (ca CIDRAllocator, err error) {
gceCloud, ok := cloud.(*gce.GCECloud)
if !ok {
err = fmt.Errorf("cloudCIDRAllocator does not support %v provider", cloud.ProviderName())
return
}
ca = &cloudCIDRAllocator{
client: client,
cloud: gceCloud,
recorder: record.NewBroadcaster().NewRecorder(
api.Scheme,
clientv1.EventSource{Component: "cidrAllocator"}),
}
glog.V(0).Infof("Using cloud CIDR allocator (provider: %v)", cloud.ProviderName())
return
}
func (ca *cloudCIDRAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
glog.V(2).Infof("Updating PodCIDR for node %v", node.Name)
cidrs, err := ca.cloud.AliasRanges(types.NodeName(node.Name))
if err != nil {
recordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable")
return fmt.Errorf("failed to allocate cidr: %v", err)
}
if len(cidrs) == 0 {
recordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable")
glog.V(2).Infof("Node %v has no CIDRs", node.Name)
return fmt.Errorf("failed to allocate cidr (none exist)")
}
node, err = ca.client.Core().Nodes().Get(node.Name, metav1.GetOptions{})
if err != nil {
glog.Errorf("Could not get Node object from Kubernetes: %v", err)
return err
}
podCIDR := cidrs[0]
if node.Spec.PodCIDR != "" {
if node.Spec.PodCIDR == podCIDR {
glog.V(3).Infof("Node %v has PodCIDR %v", node.Name, podCIDR)
return nil
}
glog.Errorf("PodCIDR cannot be reassigned, node %v spec has %v, but cloud provider has assigned %v",
node.Name, node.Spec.PodCIDR, podCIDR)
// We fall through and set the CIDR despite this error. This
// implements the same logic as implemented in the
// rangeAllocator.
//
// See https://github.com/kubernetes/kubernetes/pull/42147#discussion_r103357248
}
node.Spec.PodCIDR = cidrs[0]
if _, err := ca.client.Core().Nodes().Update(node); err == nil {
glog.V(2).Infof("Node %v PodCIDR set to %v", node.Name, podCIDR)
} else {
glog.Errorf("Could not update node %v PodCIDR to %v: %v",
node.Name, podCIDR, err)
return err
}
err = nodeutil.SetNodeCondition(ca.client, types.NodeName(node.Name), v1.NodeCondition{
Type: v1.NodeNetworkUnavailable,
Status: v1.ConditionFalse,
Reason: "RouteCreated",
Message: "NodeController create implicit route",
LastTransitionTime: metav1.Now(),
})
if err != nil {
glog.Errorf("Error setting route status for node %v: %v",
node.Name, err)
}
return err
}
func (ca *cloudCIDRAllocator) ReleaseCIDR(node *v1.Node) error {
glog.V(2).Infof("Node %v PodCIDR (%v) will be released by external cloud provider (not managed by controller)",
node.Name, node.Spec.PodCIDR)
return nil
}

View File

@ -109,6 +109,8 @@ type nodeStatusData struct {
type NodeController struct {
allocateNodeCIDRs bool
allocatorType CIDRAllocatorType
cloud cloudprovider.Interface
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
@ -162,9 +164,8 @@ type NodeController struct {
podInformerSynced cache.InformerSynced
// allocate/recycle CIDRs for node if allocateNodeCIDRs == true
cidrAllocator CIDRAllocator
// manages taints
taintManager *NoExecuteTaintManager
forcefullyDeletePod func(*v1.Pod) error
@ -210,6 +211,7 @@ func NewNodeController(
serviceCIDR *net.IPNet,
nodeCIDRMaskSize int,
allocateNodeCIDRs bool,
allocatorType CIDRAllocatorType,
runTaintManager bool,
useTaintBasedEvictions bool) (*NodeController, error) {
eventBroadcaster := record.NewBroadcaster()
@ -254,6 +256,7 @@ func NewNodeController(
clusterCIDR: clusterCIDR,
serviceCIDR: serviceCIDR,
allocateNodeCIDRs: allocateNodeCIDRs,
allocatorType: allocatorType,
forcefullyDeletePod: func(p *v1.Pod) error { return forcefullyDeletePod(kubeClient, p) },
nodeExistsInCloudProvider: func(nodeName types.NodeName) (bool, error) { return nodeExistsInCloudProvider(cloud, nodeName) },
evictionLimiterQPS: evictionLimiterQPS,
@ -309,7 +312,6 @@ func NewNodeController(
})
nc.podInformerSynced = podInformer.Informer().HasSynced
nodeEventHandlerFuncs := cache.ResourceEventHandlerFuncs{}
if nc.allocateNodeCIDRs {
var nodeList *v1.NodeList
var err error
@ -328,147 +330,32 @@ func NewNodeController(
}); pollErr != nil {
return nil, fmt.Errorf("Failed to list all nodes in %v, cannot proceed without updating CIDR map", apiserverStartupGracePeriod)
}
nc.cidrAllocator, err = NewCIDRRangeAllocator(kubeClient, clusterCIDR, serviceCIDR, nodeCIDRMaskSize, nodeList)
switch nc.allocatorType {
case RangeAllocatorType:
nc.cidrAllocator, err = NewCIDRRangeAllocator(
kubeClient, clusterCIDR, serviceCIDR, nodeCIDRMaskSize, nodeList)
case CloudAllocatorType:
nc.cidrAllocator, err = NewCloudCIDRAllocator(kubeClient, cloud)
default:
return nil, fmt.Errorf("Invalid CIDR allocator type: %v", nc.allocatorType)
}
if err != nil {
return nil, err
}
nodeEventHandlerFuncs = cache.ResourceEventHandlerFuncs{
AddFunc: func(originalObj interface{}) {
obj, err := api.Scheme.DeepCopy(originalObj)
if err != nil {
utilruntime.HandleError(err)
return
}
node := obj.(*v1.Node)
if err := nc.cidrAllocator.AllocateOrOccupyCIDR(node); err != nil {
utilruntime.HandleError(fmt.Errorf("Error allocating CIDR: %v", err))
}
if nc.taintManager != nil {
nc.taintManager.NodeUpdated(nil, node)
}
},
UpdateFunc: func(oldNode, newNode interface{}) {
node := newNode.(*v1.Node)
prevNode := oldNode.(*v1.Node)
// If the PodCIDR is not empty we either:
// - already processed a Node that already had a CIDR after NC restarted
// (cidr is marked as used),
// - already processed a Node successfully and allocated a CIDR for it
// (cidr is marked as used),
// - already processed a Node but we did saw a "timeout" response and
// request eventually got through in this case we haven't released
// the allocated CIDR (cidr is still marked as used).
// There's a possible error here:
// - NC sees a new Node and assigns a CIDR X to it,
// - Update Node call fails with a timeout,
// - Node is updated by some other component, NC sees an update and
// assigns CIDR Y to the Node,
// - Both CIDR X and CIDR Y are marked as used in the local cache,
// even though Node sees only CIDR Y
// The problem here is that in in-memory cache we see CIDR X as marked,
// which prevents it from being assigned to any new node. The cluster
// state is correct.
// Restart of NC fixes the issue.
if node.Spec.PodCIDR == "" {
nodeCopy, err := api.Scheme.Copy(node)
if err != nil {
utilruntime.HandleError(err)
return
}
if err := nc.cidrAllocator.AllocateOrOccupyCIDR(nodeCopy.(*v1.Node)); err != nil {
utilruntime.HandleError(fmt.Errorf("Error allocating CIDR: %v", err))
}
}
if nc.taintManager != nil {
nc.taintManager.NodeUpdated(prevNode, node)
}
},
DeleteFunc: func(originalObj interface{}) {
obj, err := api.Scheme.DeepCopy(originalObj)
if err != nil {
utilruntime.HandleError(err)
return
}
node, isNode := obj.(*v1.Node)
// We can get DeletedFinalStateUnknown instead of *v1.Node here and we need to handle that correctly. #34692
if !isNode {
deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("Received unexpected object: %v", obj)
return
}
node, ok = deletedState.Obj.(*v1.Node)
if !ok {
glog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj)
return
}
}
if nc.taintManager != nil {
nc.taintManager.NodeUpdated(node, nil)
}
if err := nc.cidrAllocator.ReleaseCIDR(node); err != nil {
glog.Errorf("Error releasing CIDR: %v", err)
}
},
}
} else {
nodeEventHandlerFuncs = cache.ResourceEventHandlerFuncs{
AddFunc: func(originalObj interface{}) {
obj, err := api.Scheme.DeepCopy(originalObj)
if err != nil {
utilruntime.HandleError(err)
return
}
node := obj.(*v1.Node)
if nc.taintManager != nil {
nc.taintManager.NodeUpdated(nil, node)
}
},
UpdateFunc: func(oldNode, newNode interface{}) {
node := newNode.(*v1.Node)
prevNode := oldNode.(*v1.Node)
if nc.taintManager != nil {
nc.taintManager.NodeUpdated(prevNode, node)
}
},
DeleteFunc: func(originalObj interface{}) {
obj, err := api.Scheme.DeepCopy(originalObj)
if err != nil {
utilruntime.HandleError(err)
return
}
node, isNode := obj.(*v1.Node)
// We can get DeletedFinalStateUnknown instead of *v1.Node here and we need to handle that correctly. #34692
if !isNode {
deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("Received unexpected object: %v", obj)
return
}
node, ok = deletedState.Obj.(*v1.Node)
if !ok {
glog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj)
return
}
}
if nc.taintManager != nil {
nc.taintManager.NodeUpdated(node, nil)
}
},
}
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: nc.onNodeAdd,
UpdateFunc: nc.onNodeUpdate,
DeleteFunc: nc.onNodeDelete,
})
}
if nc.runTaintManager {
nc.taintManager = NewNoExecuteTaintManager(kubeClient)
}
nodeInformer.Informer().AddEventHandler(nodeEventHandlerFuncs)
nc.nodeLister = nodeInformer.Lister()
nc.nodeInformerSynced = nodeInformer.Informer().HasSynced
@ -546,6 +433,90 @@ func (nc *NodeController) doTaintingPass() {
}
}
func (nc *NodeController) onNodeAdd(originalObj interface{}) {
obj, err := api.Scheme.DeepCopy(originalObj)
if err != nil {
utilruntime.HandleError(err)
return
}
node := obj.(*v1.Node)
if err := nc.cidrAllocator.AllocateOrOccupyCIDR(node); err != nil {
utilruntime.HandleError(fmt.Errorf("Error allocating CIDR: %v", err))
}
if nc.taintManager != nil {
nc.taintManager.NodeUpdated(nil, node)
}
}
func (nc *NodeController) onNodeUpdate(oldNode, newNode interface{}) {
node := newNode.(*v1.Node)
prevNode := oldNode.(*v1.Node)
// If the PodCIDR is not empty we either:
// - already processed a Node that already had a CIDR after NC restarted
// (cidr is marked as used),
// - already processed a Node successfully and allocated a CIDR for it
// (cidr is marked as used),
// - already processed a Node but we did saw a "timeout" response and
// request eventually got through in this case we haven't released
// the allocated CIDR (cidr is still marked as used).
// There's a possible error here:
// - NC sees a new Node and assigns a CIDR X to it,
// - Update Node call fails with a timeout,
// - Node is updated by some other component, NC sees an update and
// assigns CIDR Y to the Node,
// - Both CIDR X and CIDR Y are marked as used in the local cache,
// even though Node sees only CIDR Y
// The problem here is that in in-memory cache we see CIDR X as marked,
// which prevents it from being assigned to any new node. The cluster
// state is correct.
// Restart of NC fixes the issue.
if node.Spec.PodCIDR == "" {
nodeCopy, err := api.Scheme.Copy(node)
if err != nil {
utilruntime.HandleError(err)
return
}
if err := nc.cidrAllocator.AllocateOrOccupyCIDR(nodeCopy.(*v1.Node)); err != nil {
utilruntime.HandleError(fmt.Errorf("Error allocating CIDR: %v", err))
}
}
if nc.taintManager != nil {
nc.taintManager.NodeUpdated(prevNode, node)
}
}
func (nc *NodeController) onNodeDelete(originalObj interface{}) {
obj, err := api.Scheme.DeepCopy(originalObj)
if err != nil {
utilruntime.HandleError(err)
return
}
node, isNode := obj.(*v1.Node)
// We can get DeletedFinalStateUnknown instead of *v1.Node here and
// we need to handle that correctly. #34692
if !isNode {
deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("Received unexpected object: %v", obj)
return
}
node, ok = deletedState.Obj.(*v1.Node)
if !ok {
glog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj)
return
}
}
if nc.taintManager != nil {
nc.taintManager.NodeUpdated(node, nil)
}
if err := nc.cidrAllocator.ReleaseCIDR(node); err != nil {
glog.Errorf("Error releasing CIDR: %v", err)
}
}
// Run starts an asynchronous loop that monitors the status of cluster nodes.
func (nc *NodeController) Run() {
go func() {

View File

@ -101,6 +101,7 @@ func NewNodeControllerFromClient(
serviceCIDR,
nodeCIDRMaskSize,
allocateNodeCIDRs,
RangeAllocatorType,
useTaints,
useTaints,
)
@ -549,9 +550,22 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
}
for _, item := range table {
nodeController, _ := NewNodeControllerFromClient(nil, item.fakeNodeHandler,
evictionTimeout, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, testNodeMonitorGracePeriod,
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false)
nodeController, _ := NewNodeControllerFromClient(
nil,
item.fakeNodeHandler,
evictionTimeout,
testRateLimiterQPS,
testRateLimiterQPS,
testLargeClusterThreshold,
testUnhealtyThreshold,
testNodeMonitorGracePeriod,
testNodeStartupGracePeriod,
testNodeMonitorPeriod,
nil,
nil,
0,
false,
false)
nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder()
for _, ds := range item.daemonSets {

View File

@ -0,0 +1,262 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"fmt"
"net"
"sync"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
clientv1 "k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"github.com/golang/glog"
)
// TODO: figure out the good setting for those constants.
const (
// controls how many NodeSpec updates NC can process concurrently.
cidrUpdateWorkers = 10
cidrUpdateQueueSize = 5000
// podCIDRUpdateRetry controls the number of retries of writing Node.Spec.PodCIDR update.
podCIDRUpdateRetry = 5
)
type rangeAllocator struct {
client clientset.Interface
cidrs *cidrSet
clusterCIDR *net.IPNet
maxCIDRs int
// Channel that is used to pass updating Nodes with assigned CIDRs to the background
// This increases a throughput of CIDR assignment by not blocking on long operations.
nodeCIDRUpdateChannel chan nodeAndCIDR
recorder record.EventRecorder
// Keep a set of nodes that are currectly being processed to avoid races in CIDR allocation
sync.Mutex
nodesInProcessing sets.String
}
// NewCIDRRangeAllocator returns a CIDRAllocator to allocate CIDR for node
// Caller must ensure subNetMaskSize is not less than cluster CIDR mask size.
// Caller must always pass in a list of existing nodes so the new allocator
// can initialize its CIDR map. NodeList is only nil in testing.
func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) {
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: "cidrAllocator"})
eventBroadcaster.StartLogging(glog.Infof)
if client != nil {
glog.V(0).Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.Core().RESTClient()).Events("")})
} else {
glog.Fatalf("kubeClient is nil when starting NodeController")
}
ra := &rangeAllocator{
client: client,
cidrs: newCIDRSet(clusterCIDR, subNetMaskSize),
clusterCIDR: clusterCIDR,
nodeCIDRUpdateChannel: make(chan nodeAndCIDR, cidrUpdateQueueSize),
recorder: recorder,
nodesInProcessing: sets.NewString(),
}
if serviceCIDR != nil {
ra.filterOutServiceRange(serviceCIDR)
} else {
glog.V(0).Info("No Service CIDR provided. Skipping filtering out service addresses.")
}
if nodeList != nil {
for _, node := range nodeList.Items {
if node.Spec.PodCIDR == "" {
glog.Infof("Node %v has no CIDR, ignoring", node.Name)
continue
} else {
glog.Infof("Node %v has CIDR %s, occupying it in CIDR map",
node.Name, node.Spec.PodCIDR)
}
if err := ra.occupyCIDR(&node); err != nil {
// This will happen if:
// 1. We find garbage in the podCIDR field. Retrying is useless.
// 2. CIDR out of range: This means a node CIDR has changed.
// This error will keep crashing controller-manager.
return nil, err
}
}
}
for i := 0; i < cidrUpdateWorkers; i++ {
go func(stopChan <-chan struct{}) {
for {
select {
case workItem, ok := <-ra.nodeCIDRUpdateChannel:
if !ok {
glog.Warning("NodeCIDRUpdateChannel read returned false.")
return
}
ra.updateCIDRAllocation(workItem)
case <-stopChan:
return
}
}
}(wait.NeverStop)
}
return ra, nil
}
func (r *rangeAllocator) insertNodeToProcessing(nodeName string) bool {
r.Lock()
defer r.Unlock()
if r.nodesInProcessing.Has(nodeName) {
return false
}
r.nodesInProcessing.Insert(nodeName)
return true
}
func (r *rangeAllocator) removeNodeFromProcessing(nodeName string) {
r.Lock()
defer r.Unlock()
r.nodesInProcessing.Delete(nodeName)
}
func (r *rangeAllocator) occupyCIDR(node *v1.Node) error {
defer r.removeNodeFromProcessing(node.Name)
if node.Spec.PodCIDR == "" {
return nil
}
_, podCIDR, err := net.ParseCIDR(node.Spec.PodCIDR)
if err != nil {
return fmt.Errorf("failed to parse node %s, CIDR %s", node.Name, node.Spec.PodCIDR)
}
if err := r.cidrs.occupy(podCIDR); err != nil {
return fmt.Errorf("failed to mark cidr as occupied: %v", err)
}
return nil
}
// WARNING: If you're adding any return calls or defer any more work from this
// function you have to handle correctly nodesInProcessing.
func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
if node == nil {
return nil
}
if !r.insertNodeToProcessing(node.Name) {
glog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name)
return nil
}
if node.Spec.PodCIDR != "" {
return r.occupyCIDR(node)
}
podCIDR, err := r.cidrs.allocateNext()
if err != nil {
r.removeNodeFromProcessing(node.Name)
recordNodeStatusChange(r.recorder, node, "CIDRNotAvailable")
return fmt.Errorf("failed to allocate cidr: %v", err)
}
glog.V(10).Infof("Putting node %s with CIDR %s into the work queue", node.Name, podCIDR)
r.nodeCIDRUpdateChannel <- nodeAndCIDR{
nodeName: node.Name,
cidr: podCIDR,
}
return nil
}
func (r *rangeAllocator) ReleaseCIDR(node *v1.Node) error {
if node == nil || node.Spec.PodCIDR == "" {
return nil
}
_, podCIDR, err := net.ParseCIDR(node.Spec.PodCIDR)
if err != nil {
return fmt.Errorf("Failed to parse CIDR %s on Node %v: %v", node.Spec.PodCIDR, node.Name, err)
}
glog.V(4).Infof("release CIDR %s", node.Spec.PodCIDR)
if err = r.cidrs.release(podCIDR); err != nil {
return fmt.Errorf("Error when releasing CIDR %v: %v", node.Spec.PodCIDR, err)
}
return err
}
// Marks all CIDRs with subNetMaskSize that belongs to serviceCIDR as used,
// so that they won't be assignable.
func (r *rangeAllocator) filterOutServiceRange(serviceCIDR *net.IPNet) {
// Checks if service CIDR has a nonempty intersection with cluster
// CIDR. It is the case if either clusterCIDR contains serviceCIDR with
// clusterCIDR's Mask applied (this means that clusterCIDR contains
// serviceCIDR) or vice versa (which means that serviceCIDR contains
// clusterCIDR).
if !r.clusterCIDR.Contains(serviceCIDR.IP.Mask(r.clusterCIDR.Mask)) && !serviceCIDR.Contains(r.clusterCIDR.IP.Mask(serviceCIDR.Mask)) {
return
}
if err := r.cidrs.occupy(serviceCIDR); err != nil {
glog.Errorf("Error filtering out service cidr %v: %v", serviceCIDR, err)
}
}
// Assigns CIDR to Node and sends an update to the API server.
func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
var err error
var node *v1.Node
defer r.removeNodeFromProcessing(data.nodeName)
for rep := 0; rep < podCIDRUpdateRetry; rep++ {
// TODO: change it to using PATCH instead of full Node updates.
node, err = r.client.Core().Nodes().Get(data.nodeName, metav1.GetOptions{})
if err != nil {
glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", data.nodeName, err)
continue
}
if node.Spec.PodCIDR != "" {
glog.Errorf("Node %v already has allocated CIDR %v. Releasing assigned one if different.", node.Name, node.Spec.PodCIDR)
if node.Spec.PodCIDR != data.cidr.String() {
if err := r.cidrs.release(data.cidr); err != nil {
glog.Errorf("Error when releasing CIDR %v", data.cidr.String())
}
}
return nil
}
node.Spec.PodCIDR = data.cidr.String()
if _, err := r.client.Core().Nodes().Update(node); err != nil {
glog.Errorf("Failed while updating Node.Spec.PodCIDR (%d retries left): %v", podCIDRUpdateRetry-rep-1, err)
} else {
break
}
}
if err != nil {
recordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed")
// We accept the fact that we may leek CIDRs here. This is safer than releasing
// them in case when we don't know if request went through.
// NodeController restart will return all falsely allocated CIDRs to the pool.
if !apierrors.IsServerTimeout(err) {
glog.Errorf("CIDR assignment for node %v failed: %v. Releasing allocated CIDR", data.nodeName, err)
if releaseErr := r.cidrs.release(data.cidr); releaseErr != nil {
glog.Errorf("Error releasing allocated CIDR for node %v: %v", data.nodeName, releaseErr)
}
}
}
return err
}

View File

@ -168,7 +168,7 @@ func init() {
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "node-controller"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "update", "delete", "patch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbac.NewRule("update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
rbac.NewRule("patch", "update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
// used for pod eviction
rbac.NewRule("update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
rbac.NewRule("list", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),

View File

@ -561,6 +561,7 @@ items:
resources:
- nodes/status
verbs:
- patch
- update
- apiGroups:
- ""

14
vendor/BUILD vendored
View File

@ -8199,6 +8199,18 @@ go_library(
],
)
go_library(
name = "google.golang.org/api/compute/v0.alpha",
srcs = ["google.golang.org/api/compute/v0.alpha/compute-gen.go"],
tags = ["automanaged"],
deps = [
"//vendor:golang.org/x/net/context",
"//vendor:golang.org/x/net/context/ctxhttp",
"//vendor:google.golang.org/api/gensupport",
"//vendor:google.golang.org/api/googleapi",
],
)
go_library(
name = "google.golang.org/api/compute/v1",
srcs = ["google.golang.org/api/compute/v1/compute-gen.go"],
@ -8241,7 +8253,9 @@ go_library(
"google.golang.org/api/gensupport/backoff.go",
"google.golang.org/api/gensupport/buffer.go",
"google.golang.org/api/gensupport/doc.go",
"google.golang.org/api/gensupport/header.go",
"google.golang.org/api/gensupport/json.go",
"google.golang.org/api/gensupport/jsonfloat.go",
"google.golang.org/api/gensupport/media.go",
"google.golang.org/api/gensupport/params.go",
"google.golang.org/api/gensupport/resumable.go",

View File

@ -70,6 +70,7 @@ type Service struct {
client *http.Client
BasePath string // API endpoint base URL
UserAgent string // optional additional User-Agent fragment
GoogleClientHeaderElement string // client header fragment, for Google use only
MetricDescriptors *MetricDescriptorsService
@ -85,6 +86,10 @@ func (s *Service) userAgent() string {
return googleapi.UserAgent + " " + s.UserAgent
}
func (s *Service) clientHeader() string {
return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement)
}
func NewMetricDescriptorsService(s *Service) *MetricDescriptorsService {
rs := &MetricDescriptorsService{s: s}
return rs
@ -555,6 +560,22 @@ func (s *Point) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *Point) UnmarshalJSON(data []byte) error {
type noMethod Point
var s1 struct {
DoubleValue *gensupport.JSONFloat64 `json:"doubleValue"`
*noMethod
}
s1.noMethod = (*noMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
if s1.DoubleValue != nil {
s.DoubleValue = (*float64)(s1.DoubleValue)
}
return nil
}
// PointDistribution: Distribution data point value type. When writing
// distribution points, try to be consistent with the boundaries of your
// buckets. If you must modify the bucket boundaries, then do so by
@ -632,6 +653,22 @@ func (s *PointDistributionBucket) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *PointDistributionBucket) UnmarshalJSON(data []byte) error {
type noMethod PointDistributionBucket
var s1 struct {
LowerBound gensupport.JSONFloat64 `json:"lowerBound"`
UpperBound gensupport.JSONFloat64 `json:"upperBound"`
*noMethod
}
s1.noMethod = (*noMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.LowerBound = float64(s1.LowerBound)
s.UpperBound = float64(s1.UpperBound)
return nil
}
// PointDistributionOverflowBucket: The overflow bucket is a special
// bucket that does not have the upperBound field; it includes all of
// the events that are no less than its lower bound.
@ -667,6 +704,20 @@ func (s *PointDistributionOverflowBucket) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *PointDistributionOverflowBucket) UnmarshalJSON(data []byte) error {
type noMethod PointDistributionOverflowBucket
var s1 struct {
LowerBound gensupport.JSONFloat64 `json:"lowerBound"`
*noMethod
}
s1.noMethod = (*noMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.LowerBound = float64(s1.LowerBound)
return nil
}
// PointDistributionUnderflowBucket: The underflow bucket is a special
// bucket that does not have the lowerBound field; it includes all of
// the events that are less than its upper bound.
@ -702,6 +753,20 @@ func (s *PointDistributionUnderflowBucket) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *PointDistributionUnderflowBucket) UnmarshalJSON(data []byte) error {
type noMethod PointDistributionUnderflowBucket
var s1 struct {
UpperBound gensupport.JSONFloat64 `json:"upperBound"`
*noMethod
}
s1.noMethod = (*noMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.UpperBound = float64(s1.UpperBound)
return nil
}
// Timeseries: The monitoring data is organized as metrics and stored as
// data points that are recorded over time. Each data point represents
// information like the CPU utilization of your virtual machine. A
@ -954,6 +1019,7 @@ func (c *MetricDescriptorsCreateCall) doRequest(alt string) (*http.Response, err
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.metricdescriptor)
if err != nil {
@ -1088,6 +1154,7 @@ func (c *MetricDescriptorsDeleteCall) doRequest(alt string) (*http.Response, err
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/metricDescriptors/{metric}")
@ -1265,6 +1332,7 @@ func (c *MetricDescriptorsListCall) doRequest(alt string) (*http.Response, error
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -1542,6 +1610,7 @@ func (c *TimeseriesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -1772,6 +1841,7 @@ func (c *TimeseriesWriteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.writetimeseriesrequest)
if err != nil {
@ -2012,6 +2082,7 @@ func (c *TimeseriesDescriptorsListCall) doRequest(alt string) (*http.Response, e
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,11 +1,11 @@
{
"kind": "discovery#restDescription",
"etag": "\"jQLIOHBVnDZie4rQHGH1WJF-INE/cpP4K9eaLrLwMGtsdl5oXjxb8rw\"",
"etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/aTs6tIgXySgjqhtr4EU6PD-kvdQ\"",
"discoveryVersion": "v1",
"id": "container:v1",
"name": "container",
"version": "v1",
"revision": "20160421",
"revision": "20161024",
"title": "Google Container Engine API",
"description": "Builds and manages clusters that run container-based applications, powered by open source Kubernetes technology.",
"ownerDomain": "google.com",
@ -183,7 +183,7 @@
},
"nodePools": {
"type": "array",
"description": "The node pools associated with this cluster. When creating a new cluster, only a single node pool should be specified. This field should not be set if \"node_config\" or \"initial_node_count\" are specified.",
"description": "The node pools associated with this cluster. This field should not be set if \"node_config\" or \"initial_node_count\" are specified.",
"items": {
"$ref": "NodePool"
}
@ -195,6 +195,10 @@
"type": "string"
}
},
"enableKubernetesAlpha": {
"type": "boolean",
"description": "Kubernetes alpha features are enabled on this cluster. This includes alpha API groups (e.g. v1alpha1) and features that may not be production ready in the kubernetes version of the master and nodes. The cluster has no SLA for uptime and master/node upgrades are disabled. Alpha enabled clusters are automatically deleted thirty days after creation."
},
"selfLink": {
"type": "string",
"description": "[Output only] Server-defined URL for the resource."
@ -259,6 +263,10 @@
"type": "integer",
"description": "[Output only] The number of nodes currently in the cluster.",
"format": "int32"
},
"expireTime": {
"type": "string",
"description": "[Output only] The time the cluster will be automatically deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format."
}
}
},
@ -283,12 +291,43 @@
"type": "string"
}
},
"serviceAccount": {
"type": "string",
"description": "The Google Cloud Platform Service Account to be used by the node VMs. If no Service Account is specified, the \"default\" service account is used."
},
"metadata": {
"type": "object",
"description": "The metadata key/value pairs assigned to instances in the cluster. Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes in length. These are reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project or be one of the four reserved keys: \"instance-template\", \"kube-env\", \"startup-script\", and \"user-data\" Values are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on them is that each value's size must be less than or equal to 32 KB. The total size of all keys and values must be less than 512 KB.",
"additionalProperties": {
"type": "string"
}
},
"imageType": {
"type": "string",
"description": "The image type to use for this node. Note that for a given image type, the latest version of it will be used."
},
"labels": {
"type": "object",
"description": "The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node. In case of conflict in label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided. For more information, including usage and the valid values, see: http://kubernetes.io/v1.1/docs/user-guide/labels.html",
"additionalProperties": {
"type": "string"
}
},
"localSsdCount": {
"type": "integer",
"description": "The number of local SSD disks to be attached to the node. The limit for this value is dependant upon the maximum number of disks available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits for more information.",
"format": "int32"
},
"tags": {
"type": "array",
"description": "The list of instance tags applied to all nodes. Tags are used to identify valid sources or targets for network firewalls and are specified by the client during cluster or node pool creation. Each tag within the list must comply with RFC1035.",
"items": {
"type": "string"
}
},
"preemptible": {
"type": "boolean",
"description": "Whether the nodes are created as preemptible VM instances. See: https://cloud.google.com/compute/docs/instances/preemptible for more inforamtion about preemptible VM instances."
}
}
},
@ -376,11 +415,11 @@
},
"selfLink": {
"type": "string",
"description": "Server-defined URL for the resource."
"description": "[Output only] Server-defined URL for the resource."
},
"version": {
"type": "string",
"description": "The version of the Kubernetes of this node."
"description": "[Output only] The version of the Kubernetes of this node."
},
"instanceGroupUrls": {
"type": "array",
@ -391,7 +430,7 @@
},
"status": {
"type": "string",
"description": "The status of the nodes in this pool instance.",
"description": "[Output only] The status of the nodes in this pool instance.",
"enum": [
"STATUS_UNSPECIFIED",
"PROVISIONING",
@ -405,6 +444,65 @@
"statusMessage": {
"type": "string",
"description": "[Output only] Additional information about the current status of this node pool instance, if available."
},
"autoscaling": {
"$ref": "NodePoolAutoscaling",
"description": "Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present."
},
"management": {
"$ref": "NodeManagement",
"description": "NodeManagement configuration for this NodePool."
}
}
},
"NodePoolAutoscaling": {
"id": "NodePoolAutoscaling",
"type": "object",
"description": "NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage.",
"properties": {
"enabled": {
"type": "boolean",
"description": "Is autoscaling enabled for this node pool."
},
"minNodeCount": {
"type": "integer",
"description": "Minimum number of nodes in the NodePool. Must be \u003e= 1 and \u003c= max_node_count.",
"format": "int32"
},
"maxNodeCount": {
"type": "integer",
"description": "Maximum number of nodes in the NodePool. Must be \u003e= min_node_count. There has to enough quota to scale up the cluster.",
"format": "int32"
}
}
},
"NodeManagement": {
"id": "NodeManagement",
"type": "object",
"description": "NodeManagement defines the set of node management services turned on for the node pool.",
"properties": {
"autoUpgrade": {
"type": "boolean",
"description": "Whether the nodes will be automatically upgraded."
},
"upgradeOptions": {
"$ref": "AutoUpgradeOptions",
"description": "Specifies the Auto Upgrade knobs for the node pool."
}
}
},
"AutoUpgradeOptions": {
"id": "AutoUpgradeOptions",
"type": "object",
"description": "AutoUpgradeOptions defines the set of options for the user to control how the Auto Upgrades will proceed.",
"properties": {
"autoUpgradeStartTime": {
"type": "string",
"description": "[Output only] This field is set when upgrades are about to commence with the approximate start time for the upgrades, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format."
},
"description": {
"type": "string",
"description": "[Output only] This field is set when upgrades are about to commence with the description of the upgrade."
}
}
},
@ -444,7 +542,8 @@
"REPAIR_CLUSTER",
"UPDATE_CLUSTER",
"CREATE_NODE_POOL",
"DELETE_NODE_POOL"
"DELETE_NODE_POOL",
"SET_NODE_POOL_MANAGEMENT"
]
},
"status": {
@ -454,7 +553,8 @@
"STATUS_UNSPECIFIED",
"PENDING",
"RUNNING",
"DONE"
"DONE",
"ABORTING"
]
},
"detail": {
@ -505,7 +605,22 @@
},
"desiredNodePoolId": {
"type": "string",
"description": "The node pool to be upgraded. This field is mandatory if the \"desired_node_version\" or \"desired_image_family\" is specified and there is more than one node pool on the cluster."
"description": "The node pool to be upgraded. This field is mandatory if \"desired_node_version\", \"desired_image_family\" or \"desired_node_pool_autoscaling\" is specified and there is more than one node pool on the cluster."
},
"desiredImageType": {
"type": "string",
"description": "The desired image type for the node pool. NOTE: Set the \"desired_node_pool\" field as well."
},
"desiredNodePoolAutoscaling": {
"$ref": "NodePoolAutoscaling",
"description": "Autoscaler configuration for the node pool specified in desired_node_pool_id. If there is only one pool in the cluster and desired_node_pool_id is not provided then the change applies to that single node pool."
},
"desiredLocations": {
"type": "array",
"description": "The desired list of Google Compute Engine [locations](/compute/docs/zones#available) in which the cluster's nodes should be located. Changing the locations a cluster is in will result in nodes being either created or removed from the cluster, depending on whether locations are being added or removed. This list must always include the cluster's primary zone.",
"items": {
"type": "string"
}
},
"desiredMasterVersion": {
"type": "string",
@ -534,6 +649,16 @@
}
}
},
"CancelOperationRequest": {
"id": "CancelOperationRequest",
"type": "object",
"description": "CancelOperationRequest cancels a single operation."
},
"Empty": {
"id": "Empty",
"type": "object",
"description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`."
},
"ServerConfig": {
"id": "ServerConfig",
"type": "object",
@ -550,13 +675,20 @@
"type": "string"
}
},
"defaultImageFamily": {
"defaultImageType": {
"type": "string",
"description": "Default image family."
"description": "Default image type."
},
"validImageFamilies": {
"validImageTypes": {
"type": "array",
"description": "List of valid image families.",
"description": "List of valid image types.",
"items": {
"type": "string"
}
},
"validMasterVersions": {
"type": "array",
"description": "List of valid master versions.",
"items": {
"type": "string"
}
@ -587,6 +719,22 @@
"description": "The node pool to create."
}
}
},
"RollbackNodePoolUpgradeRequest": {
"id": "RollbackNodePoolUpgradeRequest",
"type": "object",
"description": "RollbackNodePoolUpgradeRequest rollbacks the previously Aborted or Failed NodePool upgrade. This will be an no-op if the last upgrade successfully completed."
},
"SetNodePoolManagementRequest": {
"id": "SetNodePoolManagementRequest",
"type": "object",
"description": "SetNodePoolManagementRequest sets the node management properties of a node pool.",
"properties": {
"management": {
"$ref": "NodeManagement",
"description": "NodeManagement configuration for the node pool."
}
}
}
},
"resources": {
@ -973,6 +1121,100 @@
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
]
},
"rollback": {
"id": "container.projects.zones.clusters.nodePools.rollback",
"path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}:rollback",
"httpMethod": "POST",
"description": "Roll back the previously Aborted or Failed NodePool upgrade. This will be an no-op if the last upgrade successfully completed.",
"parameters": {
"projectId": {
"type": "string",
"description": "The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).",
"required": true,
"location": "path"
},
"zone": {
"type": "string",
"description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.",
"required": true,
"location": "path"
},
"clusterId": {
"type": "string",
"description": "The name of the cluster to rollback.",
"required": true,
"location": "path"
},
"nodePoolId": {
"type": "string",
"description": "The name of the node pool to rollback.",
"required": true,
"location": "path"
}
},
"parameterOrder": [
"projectId",
"zone",
"clusterId",
"nodePoolId"
],
"request": {
"$ref": "RollbackNodePoolUpgradeRequest"
},
"response": {
"$ref": "Operation"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
]
},
"setManagement": {
"id": "container.projects.zones.clusters.nodePools.setManagement",
"path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/setManagement",
"httpMethod": "POST",
"description": "Sets the NodeManagement options for a node pool.",
"parameters": {
"projectId": {
"type": "string",
"description": "The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).",
"required": true,
"location": "path"
},
"zone": {
"type": "string",
"description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.",
"required": true,
"location": "path"
},
"clusterId": {
"type": "string",
"description": "The name of the cluster to update.",
"required": true,
"location": "path"
},
"nodePoolId": {
"type": "string",
"description": "The name of the node pool to update.",
"required": true,
"location": "path"
}
},
"parameterOrder": [
"projectId",
"zone",
"clusterId",
"nodePoolId"
],
"request": {
"$ref": "SetNodePoolManagementRequest"
},
"response": {
"$ref": "Operation"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
]
}
}
}
@ -1046,6 +1288,46 @@
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
]
},
"cancel": {
"id": "container.projects.zones.operations.cancel",
"path": "v1/projects/{projectId}/zones/{zone}/operations/{operationId}:cancel",
"httpMethod": "POST",
"description": "Cancels the specified operation.",
"parameters": {
"projectId": {
"type": "string",
"description": "The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).",
"required": true,
"location": "path"
},
"zone": {
"type": "string",
"description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the operation resides.",
"required": true,
"location": "path"
},
"operationId": {
"type": "string",
"description": "The server-assigned `name` of the operation.",
"required": true,
"location": "path"
}
},
"parameterOrder": [
"projectId",
"zone",
"operationId"
],
"request": {
"$ref": "CancelOperationRequest"
},
"response": {
"$ref": "Empty"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
]
}
}
}

View File

@ -64,6 +64,7 @@ type Service struct {
client *http.Client
BasePath string // API endpoint base URL
UserAgent string // optional additional User-Agent fragment
GoogleClientHeaderElement string // client header fragment, for Google use only
Projects *ProjectsService
}
@ -75,6 +76,10 @@ func (s *Service) userAgent() string {
return googleapi.UserAgent + " " + s.UserAgent
}
func (s *Service) clientHeader() string {
return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement)
}
func NewProjectsService(s *Service) *ProjectsService {
rs := &ProjectsService{s: s}
rs.Zones = NewProjectsZonesService(s)
@ -171,6 +176,49 @@ func (s *AddonsConfig) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// AutoUpgradeOptions: AutoUpgradeOptions defines the set of options for
// the user to control how the Auto Upgrades will proceed.
type AutoUpgradeOptions struct {
// AutoUpgradeStartTime: [Output only] This field is set when upgrades
// are about to commence with the approximate start time for the
// upgrades, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text
// format.
AutoUpgradeStartTime string `json:"autoUpgradeStartTime,omitempty"`
// Description: [Output only] This field is set when upgrades are about
// to commence with the description of the upgrade.
Description string `json:"description,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "AutoUpgradeStartTime") to unconditionally include in API requests.
// By default, fields with empty values are omitted from API requests.
// However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AutoUpgradeStartTime") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *AutoUpgradeOptions) MarshalJSON() ([]byte, error) {
type noMethod AutoUpgradeOptions
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// CancelOperationRequest: CancelOperationRequest cancels a single
// operation.
type CancelOperationRequest struct {
}
// Cluster: A Google Container Engine cluster.
type Cluster struct {
// AddonsConfig: Configurations for the various addons available to run
@ -205,12 +253,25 @@ type Cluster struct {
// Description: An optional description of this cluster.
Description string `json:"description,omitempty"`
// EnableKubernetesAlpha: Kubernetes alpha features are enabled on this
// cluster. This includes alpha API groups (e.g. v1alpha1) and features
// that may not be production ready in the kubernetes version of the
// master and nodes. The cluster has no SLA for uptime and master/node
// upgrades are disabled. Alpha enabled clusters are automatically
// deleted thirty days after creation.
EnableKubernetesAlpha bool `json:"enableKubernetesAlpha,omitempty"`
// Endpoint: [Output only] The IP address of this cluster's master
// endpoint. The endpoint can be accessed from the internet at
// `https://username:password@endpoint/`. See the `masterAuth` property
// of this resource for username and password information.
Endpoint string `json:"endpoint,omitempty"`
// ExpireTime: [Output only] The time the cluster will be automatically
// deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text
// format.
ExpireTime string `json:"expireTime,omitempty"`
// InitialClusterVersion: [Output only] The software version of the
// master endpoint and kubelets used in the cluster when it was first
// created. The version can be upgraded over time.
@ -280,9 +341,8 @@ type Cluster struct {
// `container_ipv4_cidr` range.
NodeIpv4CidrSize int64 `json:"nodeIpv4CidrSize,omitempty"`
// NodePools: The node pools associated with this cluster. When creating
// a new cluster, only a single node pool should be specified. This
// field should not be set if "node_config" or "initial_node_count" are
// NodePools: The node pools associated with this cluster. This field
// should not be set if "node_config" or "initial_node_count" are
// specified.
NodePools []*NodePool `json:"nodePools,omitempty"`
@ -355,6 +415,18 @@ type ClusterUpdate struct {
// to run in the cluster.
DesiredAddonsConfig *AddonsConfig `json:"desiredAddonsConfig,omitempty"`
// DesiredImageType: The desired image type for the node pool. NOTE: Set
// the "desired_node_pool" field as well.
DesiredImageType string `json:"desiredImageType,omitempty"`
// DesiredLocations: The desired list of Google Compute Engine
// [locations](/compute/docs/zones#available) in which the cluster's
// nodes should be located. Changing the locations a cluster is in will
// result in nodes being either created or removed from the cluster,
// depending on whether locations are being added or removed. This list
// must always include the cluster's primary zone.
DesiredLocations []string `json:"desiredLocations,omitempty"`
// DesiredMasterVersion: The Kubernetes version to change the master to.
// The only valid value is the latest supported version. Use "-" to have
// the server automatically select the latest version.
@ -366,9 +438,16 @@ type ClusterUpdate struct {
// "none" - no metrics will be exported from the cluster
DesiredMonitoringService string `json:"desiredMonitoringService,omitempty"`
// DesiredNodePoolAutoscaling: Autoscaler configuration for the node
// pool specified in desired_node_pool_id. If there is only one pool in
// the cluster and desired_node_pool_id is not provided then the change
// applies to that single node pool.
DesiredNodePoolAutoscaling *NodePoolAutoscaling `json:"desiredNodePoolAutoscaling,omitempty"`
// DesiredNodePoolId: The node pool to be upgraded. This field is
// mandatory if the "desired_node_version" or "desired_image_family" is
// specified and there is more than one node pool on the cluster.
// mandatory if "desired_node_version", "desired_image_family" or
// "desired_node_pool_autoscaling" is specified and there is more than
// one node pool on the cluster.
DesiredNodePoolId string `json:"desiredNodePoolId,omitempty"`
// DesiredNodeVersion: The Kubernetes version to change the nodes to
@ -458,6 +537,18 @@ func (s *CreateNodePoolRequest) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Empty: A generic empty message that you can re-use to avoid defining
// duplicated empty messages in your APIs. A typical example is to use
// it as the request or the response type of an API method. For
// instance: service Foo { rpc Bar(google.protobuf.Empty) returns
// (google.protobuf.Empty); } The JSON representation for `Empty` is
// empty JSON object `{}`.
type Empty struct {
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
}
// HorizontalPodAutoscaling: Configuration options for the horizontal
// pod autoscaling feature, which increases or decreases the number of
// replica pods a replication controller has based on the resource usage
@ -689,6 +780,26 @@ type NodeConfig struct {
// disk size is 100GB.
DiskSizeGb int64 `json:"diskSizeGb,omitempty"`
// ImageType: The image type to use for this node. Note that for a given
// image type, the latest version of it will be used.
ImageType string `json:"imageType,omitempty"`
// Labels: The map of Kubernetes labels (key/value pairs) to be applied
// to each node. These will added in addition to any default label(s)
// that Kubernetes may apply to the node. In case of conflict in label
// keys, the applied set may differ depending on the Kubernetes version
// -- it's best to assume the behavior is undefined and conflicts should
// be avoided. For more information, including usage and the valid
// values, see: http://kubernetes.io/v1.1/docs/user-guide/labels.html
Labels map[string]string `json:"labels,omitempty"`
// LocalSsdCount: The number of local SSD disks to be attached to the
// node. The limit for this value is dependant upon the maximum number
// of disks available on a machine per zone. See:
// https://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits for more
// information.
LocalSsdCount int64 `json:"localSsdCount,omitempty"`
// MachineType: The name of a Google Compute Engine [machine
// type](/compute/docs/machine-types) (e.g. `n1-standard-1`). If
// unspecified, the default machine type is `n1-standard-1`.
@ -719,6 +830,23 @@ type NodeConfig struct {
// case their required scopes will be added.
OauthScopes []string `json:"oauthScopes,omitempty"`
// Preemptible: Whether the nodes are created as preemptible VM
// instances. See:
// https://cloud.google.com/compute/docs/instances/preemptible for more
// inforamtion about preemptible VM instances.
Preemptible bool `json:"preemptible,omitempty"`
// ServiceAccount: The Google Cloud Platform Service Account to be used
// by the node VMs. If no Service Account is specified, the "default"
// service account is used.
ServiceAccount string `json:"serviceAccount,omitempty"`
// Tags: The list of instance tags applied to all nodes. Tags are used
// to identify valid sources or targets for network firewalls and are
// specified by the client during cluster or node pool creation. Each
// tag within the list must comply with RFC1035.
Tags []string `json:"tags,omitempty"`
// ForceSendFields is a list of field names (e.g. "DiskSizeGb") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
@ -742,6 +870,38 @@ func (s *NodeConfig) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// NodeManagement: NodeManagement defines the set of node management
// services turned on for the node pool.
type NodeManagement struct {
// AutoUpgrade: Whether the nodes will be automatically upgraded.
AutoUpgrade bool `json:"autoUpgrade,omitempty"`
// UpgradeOptions: Specifies the Auto Upgrade knobs for the node pool.
UpgradeOptions *AutoUpgradeOptions `json:"upgradeOptions,omitempty"`
// ForceSendFields is a list of field names (e.g. "AutoUpgrade") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AutoUpgrade") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *NodeManagement) MarshalJSON() ([]byte, error) {
type noMethod NodeManagement
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// NodePool: NodePool contains the name and configuration for a
// cluster's node pool. Node pools are a set of nodes (i.e. VM's), with
// a common configuration and specification, under the control of the
@ -749,6 +909,10 @@ func (s *NodeConfig) MarshalJSON() ([]byte, error) {
// them, which may be used to reference them during pod scheduling. They
// may also be resized up or down, to accommodate the workload.
type NodePool struct {
// Autoscaling: Autoscaler configuration for this NodePool. Autoscaler
// is enabled only if a valid configuration is present.
Autoscaling *NodePoolAutoscaling `json:"autoscaling,omitempty"`
// Config: The node configuration of the pool.
Config *NodeConfig `json:"config,omitempty"`
@ -763,13 +927,16 @@ type NodePool struct {
// pool.
InstanceGroupUrls []string `json:"instanceGroupUrls,omitempty"`
// Management: NodeManagement configuration for this NodePool.
Management *NodeManagement `json:"management,omitempty"`
// Name: The name of the node pool.
Name string `json:"name,omitempty"`
// SelfLink: Server-defined URL for the resource.
// SelfLink: [Output only] Server-defined URL for the resource.
SelfLink string `json:"selfLink,omitempty"`
// Status: The status of the nodes in this pool instance.
// Status: [Output only] The status of the nodes in this pool instance.
//
// Possible values:
// "STATUS_UNSPECIFIED"
@ -785,14 +952,14 @@ type NodePool struct {
// status of this node pool instance, if available.
StatusMessage string `json:"statusMessage,omitempty"`
// Version: The version of the Kubernetes of this node.
// Version: [Output only] The version of the Kubernetes of this node.
Version string `json:"version,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Config") to
// ForceSendFields is a list of field names (e.g. "Autoscaling") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
@ -800,10 +967,10 @@ type NodePool struct {
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Config") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// NullFields is a list of field names (e.g. "Autoscaling") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
@ -815,6 +982,44 @@ func (s *NodePool) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// NodePoolAutoscaling: NodePoolAutoscaling contains information
// required by cluster autoscaler to adjust the size of the node pool to
// the current cluster usage.
type NodePoolAutoscaling struct {
// Enabled: Is autoscaling enabled for this node pool.
Enabled bool `json:"enabled,omitempty"`
// MaxNodeCount: Maximum number of nodes in the NodePool. Must be >=
// min_node_count. There has to enough quota to scale up the cluster.
MaxNodeCount int64 `json:"maxNodeCount,omitempty"`
// MinNodeCount: Minimum number of nodes in the NodePool. Must be >= 1
// and <= max_node_count.
MinNodeCount int64 `json:"minNodeCount,omitempty"`
// ForceSendFields is a list of field names (e.g. "Enabled") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Enabled") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *NodePoolAutoscaling) MarshalJSON() ([]byte, error) {
type noMethod NodePoolAutoscaling
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Operation: This operation resource represents operations that may
// have happened or are happening on the cluster. All fields are output
// only.
@ -837,6 +1042,7 @@ type Operation struct {
// "UPDATE_CLUSTER"
// "CREATE_NODE_POOL"
// "DELETE_NODE_POOL"
// "SET_NODE_POOL_MANAGEMENT"
OperationType string `json:"operationType,omitempty"`
// SelfLink: Server-defined URL for the resource.
@ -849,6 +1055,7 @@ type Operation struct {
// "PENDING"
// "RUNNING"
// "DONE"
// "ABORTING"
Status string `json:"status,omitempty"`
// StatusMessage: If an error has occurred, a textual description of the
@ -890,17 +1097,26 @@ func (s *Operation) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// RollbackNodePoolUpgradeRequest: RollbackNodePoolUpgradeRequest
// rollbacks the previously Aborted or Failed NodePool upgrade. This
// will be an no-op if the last upgrade successfully completed.
type RollbackNodePoolUpgradeRequest struct {
}
// ServerConfig: Container Engine service configuration.
type ServerConfig struct {
// DefaultClusterVersion: Version of Kubernetes the service deploys by
// default.
DefaultClusterVersion string `json:"defaultClusterVersion,omitempty"`
// DefaultImageFamily: Default image family.
DefaultImageFamily string `json:"defaultImageFamily,omitempty"`
// DefaultImageType: Default image type.
DefaultImageType string `json:"defaultImageType,omitempty"`
// ValidImageFamilies: List of valid image families.
ValidImageFamilies []string `json:"validImageFamilies,omitempty"`
// ValidImageTypes: List of valid image types.
ValidImageTypes []string `json:"validImageTypes,omitempty"`
// ValidMasterVersions: List of valid master versions.
ValidMasterVersions []string `json:"validMasterVersions,omitempty"`
// ValidNodeVersions: List of valid node upgrade target versions.
ValidNodeVersions []string `json:"validNodeVersions,omitempty"`
@ -934,6 +1150,35 @@ func (s *ServerConfig) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SetNodePoolManagementRequest: SetNodePoolManagementRequest sets the
// node management properties of a node pool.
type SetNodePoolManagementRequest struct {
// Management: NodeManagement configuration for the node pool.
Management *NodeManagement `json:"management,omitempty"`
// ForceSendFields is a list of field names (e.g. "Management") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Management") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SetNodePoolManagementRequest) MarshalJSON() ([]byte, error) {
type noMethod SetNodePoolManagementRequest
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// UpdateClusterRequest: UpdateClusterRequest updates the settings of a
// cluster.
type UpdateClusterRequest struct {
@ -1025,6 +1270,7 @@ func (c *ProjectsZonesGetServerconfigCall) doRequest(alt string) (*http.Response
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -1171,6 +1417,7 @@ func (c *ProjectsZonesClustersCreateCall) doRequest(alt string) (*http.Response,
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.createclusterrequest)
if err != nil {
@ -1319,6 +1566,7 @@ func (c *ProjectsZonesClustersDeleteCall) doRequest(alt string) (*http.Response,
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}")
@ -1473,6 +1721,7 @@ func (c *ProjectsZonesClustersGetCall) doRequest(alt string) (*http.Response, er
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -1629,6 +1878,7 @@ func (c *ProjectsZonesClustersListCall) doRequest(alt string) (*http.Response, e
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -1769,6 +2019,7 @@ func (c *ProjectsZonesClustersUpdateCall) doRequest(alt string) (*http.Response,
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.updateclusterrequest)
if err != nil {
@ -1922,6 +2173,7 @@ func (c *ProjectsZonesClustersNodePoolsCreateCall) doRequest(alt string) (*http.
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.createnodepoolrequest)
if err != nil {
@ -2075,6 +2327,7 @@ func (c *ProjectsZonesClustersNodePoolsDeleteCall) doRequest(alt string) (*http.
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}")
@ -2239,6 +2492,7 @@ func (c *ProjectsZonesClustersNodePoolsGetCall) doRequest(alt string) (*http.Res
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -2404,6 +2658,7 @@ func (c *ProjectsZonesClustersNodePoolsListCall) doRequest(alt string) (*http.Re
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -2498,6 +2753,490 @@ func (c *ProjectsZonesClustersNodePoolsListCall) Do(opts ...googleapi.CallOption
}
// method id "container.projects.zones.clusters.nodePools.rollback":
type ProjectsZonesClustersNodePoolsRollbackCall struct {
s *Service
projectId string
zone string
clusterId string
nodePoolId string
rollbacknodepoolupgraderequest *RollbackNodePoolUpgradeRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Rollback: Roll back the previously Aborted or Failed NodePool
// upgrade. This will be an no-op if the last upgrade successfully
// completed.
func (r *ProjectsZonesClustersNodePoolsService) Rollback(projectId string, zone string, clusterId string, nodePoolId string, rollbacknodepoolupgraderequest *RollbackNodePoolUpgradeRequest) *ProjectsZonesClustersNodePoolsRollbackCall {
c := &ProjectsZonesClustersNodePoolsRollbackCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.projectId = projectId
c.zone = zone
c.clusterId = clusterId
c.nodePoolId = nodePoolId
c.rollbacknodepoolupgraderequest = rollbacknodepoolupgraderequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsZonesClustersNodePoolsRollbackCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersNodePoolsRollbackCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsZonesClustersNodePoolsRollbackCall) Context(ctx context.Context) *ProjectsZonesClustersNodePoolsRollbackCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsZonesClustersNodePoolsRollbackCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsZonesClustersNodePoolsRollbackCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.rollbacknodepoolupgraderequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}:rollback")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"projectId": c.projectId,
"zone": c.zone,
"clusterId": c.clusterId,
"nodePoolId": c.nodePoolId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "container.projects.zones.clusters.nodePools.rollback" call.
// Exactly one of *Operation or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ProjectsZonesClustersNodePoolsRollbackCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Roll back the previously Aborted or Failed NodePool upgrade. This will be an no-op if the last upgrade successfully completed.",
// "httpMethod": "POST",
// "id": "container.projects.zones.clusters.nodePools.rollback",
// "parameterOrder": [
// "projectId",
// "zone",
// "clusterId",
// "nodePoolId"
// ],
// "parameters": {
// "clusterId": {
// "description": "The name of the cluster to rollback.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "nodePoolId": {
// "description": "The name of the node pool to rollback.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "projectId": {
// "description": "The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "zone": {
// "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}:rollback",
// "request": {
// "$ref": "RollbackNodePoolUpgradeRequest"
// },
// "response": {
// "$ref": "Operation"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "container.projects.zones.clusters.nodePools.setManagement":
type ProjectsZonesClustersNodePoolsSetManagementCall struct {
s *Service
projectId string
zone string
clusterId string
nodePoolId string
setnodepoolmanagementrequest *SetNodePoolManagementRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// SetManagement: Sets the NodeManagement options for a node pool.
func (r *ProjectsZonesClustersNodePoolsService) SetManagement(projectId string, zone string, clusterId string, nodePoolId string, setnodepoolmanagementrequest *SetNodePoolManagementRequest) *ProjectsZonesClustersNodePoolsSetManagementCall {
c := &ProjectsZonesClustersNodePoolsSetManagementCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.projectId = projectId
c.zone = zone
c.clusterId = clusterId
c.nodePoolId = nodePoolId
c.setnodepoolmanagementrequest = setnodepoolmanagementrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsZonesClustersNodePoolsSetManagementCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersNodePoolsSetManagementCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsZonesClustersNodePoolsSetManagementCall) Context(ctx context.Context) *ProjectsZonesClustersNodePoolsSetManagementCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsZonesClustersNodePoolsSetManagementCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsZonesClustersNodePoolsSetManagementCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.setnodepoolmanagementrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/setManagement")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"projectId": c.projectId,
"zone": c.zone,
"clusterId": c.clusterId,
"nodePoolId": c.nodePoolId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "container.projects.zones.clusters.nodePools.setManagement" call.
// Exactly one of *Operation or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ProjectsZonesClustersNodePoolsSetManagementCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Sets the NodeManagement options for a node pool.",
// "httpMethod": "POST",
// "id": "container.projects.zones.clusters.nodePools.setManagement",
// "parameterOrder": [
// "projectId",
// "zone",
// "clusterId",
// "nodePoolId"
// ],
// "parameters": {
// "clusterId": {
// "description": "The name of the cluster to update.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "nodePoolId": {
// "description": "The name of the node pool to update.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "projectId": {
// "description": "The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "zone": {
// "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/setManagement",
// "request": {
// "$ref": "SetNodePoolManagementRequest"
// },
// "response": {
// "$ref": "Operation"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "container.projects.zones.operations.cancel":
type ProjectsZonesOperationsCancelCall struct {
s *Service
projectId string
zone string
operationId string
canceloperationrequest *CancelOperationRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Cancel: Cancels the specified operation.
func (r *ProjectsZonesOperationsService) Cancel(projectId string, zone string, operationId string, canceloperationrequest *CancelOperationRequest) *ProjectsZonesOperationsCancelCall {
c := &ProjectsZonesOperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.projectId = projectId
c.zone = zone
c.operationId = operationId
c.canceloperationrequest = canceloperationrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsZonesOperationsCancelCall) Fields(s ...googleapi.Field) *ProjectsZonesOperationsCancelCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsZonesOperationsCancelCall) Context(ctx context.Context) *ProjectsZonesOperationsCancelCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsZonesOperationsCancelCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsZonesOperationsCancelCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.canceloperationrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/operations/{operationId}:cancel")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"projectId": c.projectId,
"zone": c.zone,
"operationId": c.operationId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "container.projects.zones.operations.cancel" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsZonesOperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Cancels the specified operation.",
// "httpMethod": "POST",
// "id": "container.projects.zones.operations.cancel",
// "parameterOrder": [
// "projectId",
// "zone",
// "operationId"
// ],
// "parameters": {
// "operationId": {
// "description": "The server-assigned `name` of the operation.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "projectId": {
// "description": "The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "zone": {
// "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the operation resides.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/projects/{projectId}/zones/{zone}/operations/{operationId}:cancel",
// "request": {
// "$ref": "CancelOperationRequest"
// },
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "container.projects.zones.operations.get":
type ProjectsZonesOperationsGetCall struct {
@ -2561,6 +3300,7 @@ func (c *ProjectsZonesOperationsGetCall) doRequest(alt string) (*http.Response,
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -2717,6 +3457,7 @@ func (c *ProjectsZonesOperationsListCall) doRequest(alt string) (*http.Response,
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}

View File

@ -76,6 +76,7 @@ type Service struct {
client *http.Client
BasePath string // API endpoint base URL
UserAgent string // optional additional User-Agent fragment
GoogleClientHeaderElement string // client header fragment, for Google use only
Changes *ChangesService
@ -93,6 +94,10 @@ func (s *Service) userAgent() string {
return googleapi.UserAgent + " " + s.UserAgent
}
func (s *Service) clientHeader() string {
return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement)
}
func NewChangesService(s *Service) *ChangesService {
rs := &ChangesService{s: s}
return rs
@ -588,6 +593,7 @@ func (c *ChangesCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.change)
if err != nil {
@ -743,6 +749,7 @@ func (c *ChangesGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -934,6 +941,7 @@ func (c *ChangesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -1122,6 +1130,7 @@ func (c *ManagedZonesCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedzone)
if err != nil {
@ -1256,6 +1265,7 @@ func (c *ManagedZonesDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}")
@ -1373,6 +1383,7 @@ func (c *ManagedZonesGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -1545,6 +1556,7 @@ func (c *ManagedZonesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -1722,6 +1734,7 @@ func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -1896,6 +1909,7 @@ func (c *ResourceRecordSetsListCall) doRequest(alt string) (*http.Response, erro
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}

22
vendor/google.golang.org/api/gensupport/header.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gensupport
import (
"fmt"
"runtime"
"strings"
)
// GoogleClientHeader returns the value to use for the x-goog-api-client
// header, which is used internally by Google.
func GoogleClientHeader(generatorVersion, clientElement string) string {
elts := []string{"gl-go/" + strings.Replace(runtime.Version(), " ", "_", -1)}
if clientElement != "" {
elts = append(elts, clientElement)
}
elts = append(elts, fmt.Sprintf("gdcl/%s", generatorVersion))
return strings.Join(elts, " ")
}

57
vendor/google.golang.org/api/gensupport/jsonfloat.go generated vendored Normal file
View File

@ -0,0 +1,57 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gensupport
import (
"encoding/json"
"errors"
"fmt"
"math"
)
// JSONFloat64 is a float64 that supports proper unmarshaling of special float
// values in JSON, according to
// https://developers.google.com/protocol-buffers/docs/proto3#json. Although
// that is a proto-to-JSON spec, it applies to all Google APIs.
//
// The jsonpb package
// (https://github.com/golang/protobuf/blob/master/jsonpb/jsonpb.go) has
// similar functionality, but only for direct translation from proto messages
// to JSON.
type JSONFloat64 float64
func (f *JSONFloat64) UnmarshalJSON(data []byte) error {
var ff float64
if err := json.Unmarshal(data, &ff); err == nil {
*f = JSONFloat64(ff)
return nil
}
var s string
if err := json.Unmarshal(data, &s); err == nil {
switch s {
case "NaN":
ff = math.NaN()
case "Infinity":
ff = math.Inf(1)
case "-Infinity":
ff = math.Inf(-1)
default:
return fmt.Errorf("google.golang.org/api/internal: bad float string %q", s)
}
*f = JSONFloat64(ff)
return nil
}
return errors.New("google.golang.org/api/internal: data not float or string")
}

View File

@ -1,3 +1,17 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gensupport
import (

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff