mirror of https://github.com/k3s-io/k3s
pull/6/head
parent
2bfa9a1f98
commit
9f77e49109
|
@ -11429,7 +11429,7 @@
|
||||||
},
|
},
|
||||||
"lastTimestamp": {
|
"lastTimestamp": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "the time at which the most recent occurance of this event was recorded"
|
"description": "the time at which the most recent occurrence of this event was recorded"
|
||||||
},
|
},
|
||||||
"count": {
|
"count": {
|
||||||
"type": "integer",
|
"type": "integer",
|
||||||
|
@ -11985,7 +11985,7 @@
|
||||||
},
|
},
|
||||||
"hostPath": {
|
"hostPath": {
|
||||||
"$ref": "v1.HostPathVolumeSource",
|
"$ref": "v1.HostPathVolumeSource",
|
||||||
"description": "a HostPath provisioned by a developer or tester; for develment use only; see http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath"
|
"description": "a HostPath provisioned by a developer or tester; for development use only; see http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath"
|
||||||
},
|
},
|
||||||
"glusterfs": {
|
"glusterfs": {
|
||||||
"$ref": "v1.GlusterfsVolumeSource",
|
"$ref": "v1.GlusterfsVolumeSource",
|
||||||
|
|
|
@ -164,7 +164,7 @@ If you see that, DNS is working correctly.
|
||||||
|
|
||||||
## How does it work?
|
## How does it work?
|
||||||
SkyDNS depends on etcd for what to serve, but it doesn't really need all of
|
SkyDNS depends on etcd for what to serve, but it doesn't really need all of
|
||||||
what etcd offers (at least not in the way we use it). For simplicty, we run
|
what etcd offers (at least not in the way we use it). For simplicity, we run
|
||||||
etcd and SkyDNS together in a pod, and we do not try to link etcd instances
|
etcd and SkyDNS together in a pod, and we do not try to link etcd instances
|
||||||
across replicas. A helper container called [kube2sky](kube2sky/) also runs in
|
across replicas. A helper container called [kube2sky](kube2sky/) also runs in
|
||||||
the pod and acts a bridge between Kubernetes and SkyDNS. It finds the
|
the pod and acts a bridge between Kubernetes and SkyDNS. It finds the
|
||||||
|
|
|
@ -49,7 +49,7 @@ SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
|
||||||
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}"
|
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}"
|
||||||
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
|
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
|
||||||
# If set to Elastic IP, master instance will be associated with this IP.
|
# If set to Elastic IP, master instance will be associated with this IP.
|
||||||
# If set to auto, a new Elastic IP will be aquired
|
# If set to auto, a new Elastic IP will be acquired
|
||||||
# Otherwise amazon-given public ip will be used (it'll change with reboot).
|
# Otherwise amazon-given public ip will be used (it'll change with reboot).
|
||||||
MASTER_RESERVED_IP="${MASTER_RESERVED_IP:-}"
|
MASTER_RESERVED_IP="${MASTER_RESERVED_IP:-}"
|
||||||
|
|
||||||
|
|
|
@ -45,7 +45,7 @@ SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
|
||||||
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}"
|
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}"
|
||||||
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
|
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
|
||||||
# If set to Elastic IP, master instance will be associated with this IP.
|
# If set to Elastic IP, master instance will be associated with this IP.
|
||||||
# If set to auto, a new Elastic IP will be aquired
|
# If set to auto, a new Elastic IP will be acquired
|
||||||
# Otherwise amazon-given public ip will be used (it'll change with reboot).
|
# Otherwise amazon-given public ip will be used (it'll change with reboot).
|
||||||
MASTER_RESERVED_IP="${MASTER_RESERVED_IP:-}"
|
MASTER_RESERVED_IP="${MASTER_RESERVED_IP:-}"
|
||||||
|
|
||||||
|
|
|
@ -876,7 +876,7 @@ function kube-up {
|
||||||
|
|
||||||
# This is a race between instance start and volume attachment. There appears to be no way to start an AWS instance with a volume attached.
|
# This is a race between instance start and volume attachment. There appears to be no way to start an AWS instance with a volume attached.
|
||||||
# To work around this, we wait for volume to be ready in setup-master-pd.sh
|
# To work around this, we wait for volume to be ready in setup-master-pd.sh
|
||||||
echo "Attaching peristent data volume (${MASTER_DISK_ID}) to master"
|
echo "Attaching persistent data volume (${MASTER_DISK_ID}) to master"
|
||||||
$AWS_CMD attach-volume --volume-id ${MASTER_DISK_ID} --device /dev/sdb --instance-id ${master_id}
|
$AWS_CMD attach-volume --volume-id ${MASTER_DISK_ID} --device /dev/sdb --instance-id ${master_id}
|
||||||
|
|
||||||
sleep 10
|
sleep 10
|
||||||
|
|
|
@ -172,7 +172,7 @@ function get-kubeconfig-bearertoken() {
|
||||||
}
|
}
|
||||||
|
|
||||||
# Sets KUBE_VERSION variable to the version passed in as an argument, or if argument is
|
# Sets KUBE_VERSION variable to the version passed in as an argument, or if argument is
|
||||||
# latest_stable, latest_release, or latest_ci fetches and sets the correponding version number
|
# latest_stable, latest_release, or latest_ci fetches and sets the corresponding version number
|
||||||
#
|
#
|
||||||
# Args:
|
# Args:
|
||||||
# $1 version string from command line
|
# $1 version string from command line
|
||||||
|
|
|
@ -425,7 +425,7 @@ function create-node-template {
|
||||||
}
|
}
|
||||||
|
|
||||||
# Robustly try to add metadata on an instance.
|
# Robustly try to add metadata on an instance.
|
||||||
# $1: The name of the instace.
|
# $1: The name of the instance.
|
||||||
# $2...$n: The metadata key=value pairs to add.
|
# $2...$n: The metadata key=value pairs to add.
|
||||||
function add-instance-metadata {
|
function add-instance-metadata {
|
||||||
local -r instance=$1
|
local -r instance=$1
|
||||||
|
|
|
@ -39,7 +39,7 @@ class TestInstallHook():
|
||||||
pmock.return_value.write_lines.assert_called_with(['line1', 'line2',
|
pmock.return_value.write_lines.assert_called_with(['line1', 'line2',
|
||||||
'test1', 'test2'])
|
'test1', 'test2'])
|
||||||
|
|
||||||
def test_update_rc_files_with_nonexistant_path(self):
|
def test_update_rc_files_with_nonexistent_path(self):
|
||||||
"""
|
"""
|
||||||
Test an unhappy path if the bashrc/users do not exist.
|
Test an unhappy path if the bashrc/users do not exist.
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
# Start is guaranteed to be called once when after the unit is installed
|
# Start is guaranteed to be called once when after the unit is installed
|
||||||
# *AND* once everytime a machine is rebooted.
|
# *AND* once every time a machine is rebooted.
|
||||||
|
|
||||||
if [ ! -f $CHARM_DIR/.unit-state ]
|
if [ ! -f $CHARM_DIR/.unit-state ]
|
||||||
then
|
then
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import sys
|
import sys
|
||||||
# This script helps parse out the private IP addreses from the
|
# This script helps parse out the private IP addresses from the
|
||||||
# `juju run` command's JSON object, see cluster/juju/util.sh
|
# `juju run` command's JSON object, see cluster/juju/util.sh
|
||||||
|
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
|
|
|
@ -135,7 +135,7 @@ function sleep-status() {
|
||||||
|
|
||||||
# sleep because we cannot get the status back of where the minions are in the deploy phase
|
# sleep because we cannot get the status back of where the minions are in the deploy phase
|
||||||
# thanks to a generic "started" state and our service not actually coming online until the
|
# thanks to a generic "started" state and our service not actually coming online until the
|
||||||
# minions have recieved the binary from the master distribution hub during relations
|
# minions have received the binary from the master distribution hub during relations
|
||||||
echo "Sleeping an additional minute to allow the cluster to settle"
|
echo "Sleeping an additional minute to allow the cluster to settle"
|
||||||
sleep 60
|
sleep 60
|
||||||
}
|
}
|
||||||
|
|
|
@ -296,7 +296,7 @@ function kube-push-internal {
|
||||||
local)
|
local)
|
||||||
kube-push-local;;
|
kube-push-local;;
|
||||||
*)
|
*)
|
||||||
echo "The only known push methods are \"release\" to use the relase tarball or \"local\" to use the binaries built by make. KUBE_PUSH is set \"$KUBE_PUSH\"" >&2
|
echo "The only known push methods are \"release\" to use the release tarball or \"local\" to use the binaries built by make. KUBE_PUSH is set \"$KUBE_PUSH\"" >&2
|
||||||
return 1;;
|
return 1;;
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
|
@ -218,7 +218,7 @@ function validate-cluster {
|
||||||
echo "Validating ${KUBERNETES_PROVIDER} cluster" 1>&2
|
echo "Validating ${KUBERNETES_PROVIDER} cluster" 1>&2
|
||||||
|
|
||||||
# Do not validate cluster size. There will be zero k8s minions until a pod is created.
|
# Do not validate cluster size. There will be zero k8s minions until a pod is created.
|
||||||
# TODO(karlkfi): use componentstatuses or equivelent when it supports non-localhost core components
|
# TODO(karlkfi): use componentstatuses or equivalent when it supports non-localhost core components
|
||||||
|
|
||||||
# Validate immediate cluster reachability and responsiveness
|
# Validate immediate cluster reachability and responsiveness
|
||||||
echo "KubeDNS: $(cluster::mesos::docker::addon_status 'kube-dns')"
|
echo "KubeDNS: $(cluster::mesos::docker::addon_status 'kube-dns')"
|
||||||
|
|
|
@ -25,7 +25,7 @@
|
||||||
# LIMITATIONS
|
# LIMITATIONS
|
||||||
# 1. controllers are not updated unless their name is changed
|
# 1. controllers are not updated unless their name is changed
|
||||||
# 3. Services will not be updated unless their name is changed,
|
# 3. Services will not be updated unless their name is changed,
|
||||||
# but for services we acually want updates without name change.
|
# but for services we actually want updates without name change.
|
||||||
# 4. Json files are not handled at all. Currently addons must be
|
# 4. Json files are not handled at all. Currently addons must be
|
||||||
# in yaml files
|
# in yaml files
|
||||||
# 5. exit code is probably not always correct (I haven't checked
|
# 5. exit code is probably not always correct (I haven't checked
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
- mode: 755
|
- mode: 755
|
||||||
|
|
||||||
# Used to restart kube-master-addons service each time salt is run
|
# Used to restart kube-master-addons service each time salt is run
|
||||||
# Actually, it doens't work (the service is not restarted),
|
# Actually, it does not work (the service is not restarted),
|
||||||
# but master-addon service always terminates after it does it job,
|
# but master-addon service always terminates after it does it job,
|
||||||
# so it is (usually) not running and it will be started when
|
# so it is (usually) not running and it will be started when
|
||||||
# salt is run.
|
# salt is run.
|
||||||
|
|
|
@ -35,7 +35,7 @@ function test-build-release {
|
||||||
"${KUBE_ROOT}/build/release.sh"
|
"${KUBE_ROOT}/build/release.sh"
|
||||||
}
|
}
|
||||||
|
|
||||||
# From user input set the necessary k8s and etcd configuration infomation
|
# From user input set the necessary k8s and etcd configuration information
|
||||||
function setClusterInfo() {
|
function setClusterInfo() {
|
||||||
# Initialize MINION_IPS in setClusterInfo function
|
# Initialize MINION_IPS in setClusterInfo function
|
||||||
# MINION_IPS is defined as a global variable, and is concatenated with other nodeIP
|
# MINION_IPS is defined as a global variable, and is concatenated with other nodeIP
|
||||||
|
|
|
@ -479,7 +479,7 @@ func (s *APIServer) Run(_ []string) error {
|
||||||
}
|
}
|
||||||
// err == systemd.SdNotifyNoSocket when not running on a systemd system
|
// err == systemd.SdNotifyNoSocket when not running on a systemd system
|
||||||
if err := systemd.SdNotify("READY=1\n"); err != nil && err != systemd.SdNotifyNoSocket {
|
if err := systemd.SdNotify("READY=1\n"); err != nil && err != systemd.SdNotifyNoSocket {
|
||||||
glog.Errorf("Unable to send systemd daemon sucessful start message: %v\n", err)
|
glog.Errorf("Unable to send systemd daemon successful start message: %v\n", err)
|
||||||
}
|
}
|
||||||
if err := secureServer.ListenAndServeTLS(s.TLSCertFile, s.TLSPrivateKeyFile); err != nil {
|
if err := secureServer.ListenAndServeTLS(s.TLSCertFile, s.TLSPrivateKeyFile); err != nil {
|
||||||
glog.Errorf("Unable to listen for secure (%v); will try again.", err)
|
glog.Errorf("Unable to listen for secure (%v); will try again.", err)
|
||||||
|
@ -497,7 +497,7 @@ func (s *APIServer) Run(_ []string) error {
|
||||||
if secureLocation == "" {
|
if secureLocation == "" {
|
||||||
// err == systemd.SdNotifyNoSocket when not running on a systemd system
|
// err == systemd.SdNotifyNoSocket when not running on a systemd system
|
||||||
if err := systemd.SdNotify("READY=1\n"); err != nil && err != systemd.SdNotifyNoSocket {
|
if err := systemd.SdNotify("READY=1\n"); err != nil && err != systemd.SdNotifyNoSocket {
|
||||||
glog.Errorf("Unable to send systemd daemon sucessful start message: %v\n", err)
|
glog.Errorf("Unable to send systemd daemon successful start message: %v\n", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
glog.Infof("Serving insecurely on %s", insecureLocation)
|
glog.Infof("Serving insecurely on %s", insecureLocation)
|
||||||
|
|
|
@ -125,7 +125,7 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
|
||||||
fs.IntVar(&s.DeletingPodsBurst, "deleting-pods-burst", 10, "Number of nodes on which pods are bursty deleted in case of node failure. For more details look into RateLimiter.")
|
fs.IntVar(&s.DeletingPodsBurst, "deleting-pods-burst", 10, "Number of nodes on which pods are bursty deleted in case of node failure. For more details look into RateLimiter.")
|
||||||
fs.IntVar(&s.RegisterRetryCount, "register-retry-count", s.RegisterRetryCount, ""+
|
fs.IntVar(&s.RegisterRetryCount, "register-retry-count", s.RegisterRetryCount, ""+
|
||||||
"The number of retries for initial node registration. Retry interval equals node-sync-period.")
|
"The number of retries for initial node registration. Retry interval equals node-sync-period.")
|
||||||
fs.MarkDeprecated("register-retry-count", "This flag is currenty no-op and will be deleted.")
|
fs.MarkDeprecated("register-retry-count", "This flag is currently no-op and will be deleted.")
|
||||||
fs.DurationVar(&s.NodeMonitorGracePeriod, "node-monitor-grace-period", 40*time.Second,
|
fs.DurationVar(&s.NodeMonitorGracePeriod, "node-monitor-grace-period", 40*time.Second,
|
||||||
"Amount of time which we allow running Node to be unresponsive before marking it unhealty. "+
|
"Amount of time which we allow running Node to be unresponsive before marking it unhealty. "+
|
||||||
"Must be N times more than kubelet's nodeStatusUpdateFrequency, "+
|
"Must be N times more than kubelet's nodeStatusUpdateFrequency, "+
|
||||||
|
|
|
@ -25,7 +25,7 @@
|
||||||
# LIMITATIONS
|
# LIMITATIONS
|
||||||
# 1. controllers are not updated unless their name is changed
|
# 1. controllers are not updated unless their name is changed
|
||||||
# 3. Services will not be updated unless their name is changed,
|
# 3. Services will not be updated unless their name is changed,
|
||||||
# but for services we acually want updates without name change.
|
# but for services we actually want updates without name change.
|
||||||
# 4. Json files are not handled at all. Currently addons must be
|
# 4. Json files are not handled at all. Currently addons must be
|
||||||
# in yaml files
|
# in yaml files
|
||||||
# 5. exit code is probably not always correct (I haven't checked
|
# 5. exit code is probably not always correct (I haven't checked
|
||||||
|
|
|
@ -25,7 +25,7 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
|
||||||
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
||||||
|
|
||||||
# This is the group that the cert creation scripts chgrp the
|
# This is the group that the cert creation scripts chgrp the
|
||||||
# cert files to. Not really changable...
|
# cert files to. Not really changeable...
|
||||||
kube_cert_group: kube-cert
|
kube_cert_group: kube-cert
|
||||||
|
|
||||||
# Internal DNS domain name.
|
# Internal DNS domain name.
|
||||||
|
|
|
@ -182,7 +182,7 @@ func parseTimeISO8601(s string) (time.Time, error) {
|
||||||
theTime.offMinute = v
|
theTime.offMinute = v
|
||||||
s = s[2:]
|
s = s[2:]
|
||||||
default:
|
default:
|
||||||
return time.Time{}, errors.New("an unknown error occured")
|
return time.Time{}, errors.New("an unknown error occurred")
|
||||||
}
|
}
|
||||||
state++
|
state++
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# Alpine linux would be great for this, but it's DNS does not use seach paths.
|
# Alpine linux would be great for this, but it's DNS does not use search paths.
|
||||||
FROM progrium/busybox
|
FROM progrium/busybox
|
||||||
MAINTAINER Tim Hockin "thockin@google.com"
|
MAINTAINER Tim Hockin "thockin@google.com"
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,7 @@ In this case, if there are problems launching a replacement scheduler process th
|
||||||
##### Command Line Arguments
|
##### Command Line Arguments
|
||||||
|
|
||||||
- `--ha` is required to enable scheduler HA and multi-scheduler leader election.
|
- `--ha` is required to enable scheduler HA and multi-scheduler leader election.
|
||||||
- `--km_path` or else (`--executor_path` and `--proxy_path`) should reference non-local-file URI's and must be identicial across schedulers.
|
- `--km_path` or else (`--executor_path` and `--proxy_path`) should reference non-local-file URI's and must be identical across schedulers.
|
||||||
|
|
||||||
If you have HDFS installed on your slaves then you can specify HDFS URI locations for the binaries:
|
If you have HDFS installed on your slaves then you can specify HDFS URI locations for the binaries:
|
||||||
|
|
||||||
|
|
|
@ -263,7 +263,7 @@ func TestExecutorRegister(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestExecutorDisconnect ensures that the executor thinks that it is not
|
// TestExecutorDisconnect ensures that the executor thinks that it is not
|
||||||
// connected after a call to Disconnected has occured.
|
// connected after a call to Disconnected has occurred.
|
||||||
func TestExecutorDisconnect(t *testing.T) {
|
func TestExecutorDisconnect(t *testing.T) {
|
||||||
mockDriver := &MockExecutorDriver{}
|
mockDriver := &MockExecutorDriver{}
|
||||||
executor := NewTestKubernetesExecutor()
|
executor := NewTestKubernetesExecutor()
|
||||||
|
@ -346,7 +346,7 @@ func TestExecutorLaunchAndKillTask(t *testing.T) {
|
||||||
select {
|
select {
|
||||||
case <-updates:
|
case <-updates:
|
||||||
case <-time.After(time.Second):
|
case <-time.After(time.Second):
|
||||||
t.Fatalf("Executor should send an intial update on Registration")
|
t.Fatalf("Executor should send an initial update on Registration")
|
||||||
}
|
}
|
||||||
|
|
||||||
pod := NewTestPod(1)
|
pod := NewTestPod(1)
|
||||||
|
|
|
@ -58,7 +58,7 @@ type FIFO interface {
|
||||||
// Pop waits until an item is ready and returns it. If multiple items are
|
// Pop waits until an item is ready and returns it. If multiple items are
|
||||||
// ready, they are returned in the order in which they were added/updated.
|
// ready, they are returned in the order in which they were added/updated.
|
||||||
// The item is removed from the queue (and the store) before it is returned,
|
// The item is removed from the queue (and the store) before it is returned,
|
||||||
// so if you don't succesfully process it, you need to add it back with Add().
|
// so if you don't successfully process it, you need to add it back with Add().
|
||||||
Pop() interface{}
|
Pop() interface{}
|
||||||
|
|
||||||
// Await attempts to Pop within the given interval; upon success the non-nil
|
// Await attempts to Pop within the given interval; upon success the non-nil
|
||||||
|
|
|
@ -350,7 +350,7 @@ func (k *kubeScheduler) doSchedule(task *podtask.T, err error) (string, error) {
|
||||||
return "", fmt.Errorf("task.offer assignment must be idempotent, task %+v: offer %+v", task, offer)
|
return "", fmt.Errorf("task.offer assignment must be idempotent, task %+v: offer %+v", task, offer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// write resource limits into the pod spec which is transfered to the executor. From here
|
// write resource limits into the pod spec which is transferred to the executor. From here
|
||||||
// on we can expect that the pod spec of a task has proper limits for CPU and memory.
|
// on we can expect that the pod spec of a task has proper limits for CPU and memory.
|
||||||
// TODO(sttts): For a later separation of the kubelet and the executor also patch the pod on the apiserver
|
// TODO(sttts): For a later separation of the kubelet and the executor also patch the pod on the apiserver
|
||||||
if unlimitedCPU := mresource.LimitPodCPU(&task.Pod, k.defaultContainerCPULimit); unlimitedCPU {
|
if unlimitedCPU := mresource.LimitPodCPU(&task.Pod, k.defaultContainerCPULimit); unlimitedCPU {
|
||||||
|
|
|
@ -52,7 +52,7 @@ This is a v1 api based, containerized prometheus ReplicationController, which sc
|
||||||
|
|
||||||
1. Use kubectl to handle auth & proxy the kubernetes API locally, emulating the old KUBERNETES_RO service.
|
1. Use kubectl to handle auth & proxy the kubernetes API locally, emulating the old KUBERNETES_RO service.
|
||||||
|
|
||||||
1. The list of services to be monitored is passed as a command line aguments in
|
1. The list of services to be monitored is passed as a command line arguments in
|
||||||
the yaml file.
|
the yaml file.
|
||||||
|
|
||||||
1. The startup scripts assumes that each service T will have
|
1. The startup scripts assumes that each service T will have
|
||||||
|
|
|
@ -191,7 +191,7 @@ $ mysql -u root -ppassword --host 104.197.63.17 --port 3306 -e 'show databases;'
|
||||||
### Troubleshooting:
|
### Troubleshooting:
|
||||||
- If you can curl or netcat the endpoint from the pod (with kubectl exec) and not from the node, you have not specified hostport and containerport.
|
- If you can curl or netcat the endpoint from the pod (with kubectl exec) and not from the node, you have not specified hostport and containerport.
|
||||||
- If you can hit the ips from the node but not from your machine outside the cluster, you have not opened firewall rules for the right network.
|
- If you can hit the ips from the node but not from your machine outside the cluster, you have not opened firewall rules for the right network.
|
||||||
- If you can't hit the ips from within the container, either haproxy or the service_loadbalacer script is not runing.
|
- If you can't hit the ips from within the container, either haproxy or the service_loadbalacer script is not running.
|
||||||
1. Use ps in the pod
|
1. Use ps in the pod
|
||||||
2. sudo restart haproxy in the pod
|
2. sudo restart haproxy in the pod
|
||||||
3. cat /etc/haproxy/haproxy.cfg in the pod
|
3. cat /etc/haproxy/haproxy.cfg in the pod
|
||||||
|
|
|
@ -141,7 +141,7 @@ func TestGetServices(t *testing.T) {
|
||||||
{Port: 20, TargetPort: util.NewIntOrStringFromInt(ports[1])},
|
{Port: 20, TargetPort: util.NewIntOrStringFromInt(ports[1])},
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2 services targetting the same endpoints, one of which is declared as a tcp service.
|
// 2 services targeting the same endpoints, one of which is declared as a tcp service.
|
||||||
svc1 := getService(servicePorts)
|
svc1 := getService(servicePorts)
|
||||||
svc2 := getService(servicePorts)
|
svc2 := getService(servicePorts)
|
||||||
endpoints := []*api.Endpoints{
|
endpoints := []*api.Endpoints{
|
||||||
|
|
|
@ -88,7 +88,7 @@ To avoid running into cluster addon resource issues, when creating a cluster wit
|
||||||
* [Kibana](http://releases.k8s.io/HEAD/cluster/addons/fluentd-elasticsearch/kibana-controller.yaml)
|
* [Kibana](http://releases.k8s.io/HEAD/cluster/addons/fluentd-elasticsearch/kibana-controller.yaml)
|
||||||
* Scale number of replicas for the following addons, if used, along with the size of cluster (there are multiple replicas of each so increasing replicas should help handle increased load, but, since load per replica also increases slightly, also consider increasing CPU/memory limits):
|
* Scale number of replicas for the following addons, if used, along with the size of cluster (there are multiple replicas of each so increasing replicas should help handle increased load, but, since load per replica also increases slightly, also consider increasing CPU/memory limits):
|
||||||
* [elasticsearch](http://releases.k8s.io/HEAD/cluster/addons/fluentd-elasticsearch/es-controller.yaml)
|
* [elasticsearch](http://releases.k8s.io/HEAD/cluster/addons/fluentd-elasticsearch/es-controller.yaml)
|
||||||
* Increase memory and CPU limits sligthly for each of the following addons, if used, along with the size of cluster (there is one replica per node but CPU/memory usage increases slightly along with cluster load/size as well):
|
* Increase memory and CPU limits slightly for each of the following addons, if used, along with the size of cluster (there is one replica per node but CPU/memory usage increases slightly along with cluster load/size as well):
|
||||||
* [FluentD with ElasticSearch Plugin](http://releases.k8s.io/HEAD/cluster/saltbase/salt/fluentd-es/fluentd-es.yaml)
|
* [FluentD with ElasticSearch Plugin](http://releases.k8s.io/HEAD/cluster/saltbase/salt/fluentd-es/fluentd-es.yaml)
|
||||||
* [FluentD with GCP Plugin](http://releases.k8s.io/HEAD/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml)
|
* [FluentD with GCP Plugin](http://releases.k8s.io/HEAD/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml)
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,7 @@ Documentation for other releases can be found at
|
||||||
|
|
||||||
This document describes several topics related to the lifecycle of a cluster: creating a new cluster,
|
This document describes several topics related to the lifecycle of a cluster: creating a new cluster,
|
||||||
upgrading your cluster's
|
upgrading your cluster's
|
||||||
master and worker nodes, performing node maintainence (e.g. kernel upgrades), and upgrading the Kubernetes API version of a
|
master and worker nodes, performing node maintenance (e.g. kernel upgrades), and upgrading the Kubernetes API version of a
|
||||||
running cluster.
|
running cluster.
|
||||||
|
|
||||||
## Creating and configuring a Cluster
|
## Creating and configuring a Cluster
|
||||||
|
@ -132,7 +132,7 @@ For pods with a replication controller, the pod will eventually be replaced by a
|
||||||
|
|
||||||
For pods with no replication controller, you need to bring up a new copy of the pod, and assuming it is not part of a service, redirect clients to it.
|
For pods with no replication controller, you need to bring up a new copy of the pod, and assuming it is not part of a service, redirect clients to it.
|
||||||
|
|
||||||
Perform maintainence work on the node.
|
Perform maintenance work on the node.
|
||||||
|
|
||||||
Make the node schedulable again:
|
Make the node schedulable again:
|
||||||
|
|
||||||
|
|
|
@ -41,7 +41,7 @@ objects.
|
||||||
|
|
||||||
Access Control: give *only* kube-apiserver read/write access to etcd. You do not
|
Access Control: give *only* kube-apiserver read/write access to etcd. You do not
|
||||||
want apiserver's etcd exposed to every node in your cluster (or worse, to the
|
want apiserver's etcd exposed to every node in your cluster (or worse, to the
|
||||||
internet at large), because access to etcd is equivilent to root in your
|
internet at large), because access to etcd is equivalent to root in your
|
||||||
cluster.
|
cluster.
|
||||||
|
|
||||||
Data Reliability: for reasonable safety, either etcd needs to be run as a
|
Data Reliability: for reasonable safety, either etcd needs to be run as a
|
||||||
|
|
|
@ -4224,7 +4224,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">hostPath</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">hostPath</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">a HostPath provisioned by a developer or tester; for develment use only; see <a href="http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath">http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath</a></p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">a HostPath provisioned by a developer or tester; for development use only; see <a href="http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath">http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath</a></p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_hostpathvolumesource">v1.HostPathVolumeSource</a></p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_hostpathvolumesource">v1.HostPathVolumeSource</a></p></td>
|
||||||
<td class="tableblock halign-left valign-top"></td>
|
<td class="tableblock halign-left valign-top"></td>
|
||||||
|
@ -5099,7 +5099,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">lastTimestamp</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">lastTimestamp</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">the time at which the most recent occurance of this event was recorded</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">the time at which the most recent occurrence of this event was recorded</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">string</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">string</p></td>
|
||||||
<td class="tableblock halign-left valign-top"></td>
|
<td class="tableblock halign-left valign-top"></td>
|
||||||
|
|
|
@ -87,7 +87,7 @@ Note: If you have write access to the main repository at github.com/GoogleCloudP
|
||||||
git remote set-url --push upstream no_push
|
git remote set-url --push upstream no_push
|
||||||
```
|
```
|
||||||
|
|
||||||
### Commiting changes to your fork
|
### Committing changes to your fork
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
git commit
|
git commit
|
||||||
|
|
|
@ -40,7 +40,7 @@ This documents the process for making release notes for a release.
|
||||||
Find the most-recent PR that was merged with the previous .0 release. Remember this as $LASTPR.
|
Find the most-recent PR that was merged with the previous .0 release. Remember this as $LASTPR.
|
||||||
_TODO_: Figure out a way to record this somewhere to save the next release engineer time.
|
_TODO_: Figure out a way to record this somewhere to save the next release engineer time.
|
||||||
|
|
||||||
Find the most-recent PR that was merged with the current .0 release. Remeber this as $CURRENTPR.
|
Find the most-recent PR that was merged with the current .0 release. Remember this as $CURRENTPR.
|
||||||
|
|
||||||
### 2) Run the release-notes tool
|
### 2) Run the release-notes tool
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
## This file is used as input to deployment script, which ammends it as needed.
|
## This file is used as input to deployment script, which amends it as needed.
|
||||||
## More specifically, we need to add peer hosts for each but the elected peer.
|
## More specifically, we need to add peer hosts for each but the elected peer.
|
||||||
|
|
||||||
coreos:
|
coreos:
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
## This file is used as input to deployment script, which ammends it as needed.
|
## This file is used as input to deployment script, which amends it as needed.
|
||||||
## More specifically, we need to add environment files for as many nodes as we
|
## More specifically, we need to add environment files for as many nodes as we
|
||||||
## are going to deploy.
|
## are going to deploy.
|
||||||
|
|
||||||
|
|
|
@ -87,7 +87,7 @@ cd kubernetes/cluster/docker-multinode
|
||||||
|
|
||||||
`Master done!`
|
`Master done!`
|
||||||
|
|
||||||
See [here](docker-multinode/master.md) for detailed instructions explaination.
|
See [here](docker-multinode/master.md) for detailed instructions explanation.
|
||||||
|
|
||||||
## Adding a worker node
|
## Adding a worker node
|
||||||
|
|
||||||
|
@ -104,7 +104,7 @@ cd kubernetes/cluster/docker-multinode
|
||||||
|
|
||||||
`Worker done!`
|
`Worker done!`
|
||||||
|
|
||||||
See [here](docker-multinode/worker.md) for detailed instructions explaination.
|
See [here](docker-multinode/worker.md) for detailed instructions explanation.
|
||||||
|
|
||||||
## Testing your cluster
|
## Testing your cluster
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
|
|
||||||
.SH NAME
|
.SH NAME
|
||||||
.PP
|
.PP
|
||||||
kubectl namespace \- SUPERCEDED: Set and view the current Kubernetes namespace
|
kubectl namespace \- SUPERSEDED: Set and view the current Kubernetes namespace
|
||||||
|
|
||||||
|
|
||||||
.SH SYNOPSIS
|
.SH SYNOPSIS
|
||||||
|
@ -13,10 +13,10 @@ kubectl namespace \- SUPERCEDED: Set and view the current Kubernetes namespace
|
||||||
|
|
||||||
.SH DESCRIPTION
|
.SH DESCRIPTION
|
||||||
.PP
|
.PP
|
||||||
SUPERCEDED: Set and view the current Kubernetes namespace scope for command line requests.
|
SUPERSEDED: Set and view the current Kubernetes namespace scope for command line requests.
|
||||||
|
|
||||||
.PP
|
.PP
|
||||||
namespace has been superceded by the context.namespace field of .kubeconfig files. See 'kubectl config set\-context \-\-help' for more details.
|
namespace has been superseded by the context.namespace field of .kubeconfig files. See 'kubectl config set\-context \-\-help' for more details.
|
||||||
|
|
||||||
|
|
||||||
.SH OPTIONS
|
.SH OPTIONS
|
||||||
|
|
|
@ -91,7 +91,7 @@ kubectl
|
||||||
* [kubectl get](kubectl_get.md) - Display one or many resources
|
* [kubectl get](kubectl_get.md) - Display one or many resources
|
||||||
* [kubectl label](kubectl_label.md) - Update the labels on a resource
|
* [kubectl label](kubectl_label.md) - Update the labels on a resource
|
||||||
* [kubectl logs](kubectl_logs.md) - Print the logs for a container in a pod.
|
* [kubectl logs](kubectl_logs.md) - Print the logs for a container in a pod.
|
||||||
* [kubectl namespace](kubectl_namespace.md) - SUPERCEDED: Set and view the current Kubernetes namespace
|
* [kubectl namespace](kubectl_namespace.md) - SUPERSEDED: Set and view the current Kubernetes namespace
|
||||||
* [kubectl patch](kubectl_patch.md) - Update field(s) of a resource by stdin.
|
* [kubectl patch](kubectl_patch.md) - Update field(s) of a resource by stdin.
|
||||||
* [kubectl port-forward](kubectl_port-forward.md) - Forward one or more local ports to a pod.
|
* [kubectl port-forward](kubectl_port-forward.md) - Forward one or more local ports to a pod.
|
||||||
* [kubectl proxy](kubectl_proxy.md) - Run a proxy to the Kubernetes API server
|
* [kubectl proxy](kubectl_proxy.md) - Run a proxy to the Kubernetes API server
|
||||||
|
|
|
@ -33,14 +33,14 @@ Documentation for other releases can be found at
|
||||||
|
|
||||||
## kubectl namespace
|
## kubectl namespace
|
||||||
|
|
||||||
SUPERCEDED: Set and view the current Kubernetes namespace
|
SUPERSEDED: Set and view the current Kubernetes namespace
|
||||||
|
|
||||||
### Synopsis
|
### Synopsis
|
||||||
|
|
||||||
|
|
||||||
SUPERCEDED: Set and view the current Kubernetes namespace scope for command line requests.
|
SUPERSEDED: Set and view the current Kubernetes namespace scope for command line requests.
|
||||||
|
|
||||||
namespace has been superceded by the context.namespace field of .kubeconfig files. See 'kubectl config set-context --help' for more details.
|
namespace has been superseded by the context.namespace field of .kubeconfig files. See 'kubectl config set-context --help' for more details.
|
||||||
|
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
|
@ -160,7 +160,7 @@ See another example of creating a secret and a pod that consumes that secret in
|
||||||
|
|
||||||
### Manually specifying an imagePullSecret
|
### Manually specifying an imagePullSecret
|
||||||
|
|
||||||
Use of imagePullSecrets is desribed in the [images documentation](images.md#specifying-imagepullsecrets-on-a-pod)
|
Use of imagePullSecrets is described in the [images documentation](images.md#specifying-imagepullsecrets-on-a-pod)
|
||||||
|
|
||||||
### Automatic use of Manually Created Secrets
|
### Automatic use of Manually Created Secrets
|
||||||
|
|
||||||
|
|
|
@ -506,7 +506,7 @@ tombstone_failure_threshold: 100000
|
||||||
# Increase if your rows are large, or if you have a very large
|
# Increase if your rows are large, or if you have a very large
|
||||||
# number of rows per partition. The competing goals are these:
|
# number of rows per partition. The competing goals are these:
|
||||||
# 1) a smaller granularity means more index entries are generated
|
# 1) a smaller granularity means more index entries are generated
|
||||||
# and looking up rows withing the partition by collation column
|
# and looking up rows within the partition by collation column
|
||||||
# is faster
|
# is faster
|
||||||
# 2) but, Cassandra will keep the collation index in memory for hot
|
# 2) but, Cassandra will keep the collation index in memory for hot
|
||||||
# rows (as part of the key cache), so a larger granularity means
|
# rows (as part of the key cache), so a larger granularity means
|
||||||
|
|
|
@ -90,7 +90,7 @@ function write_api_server_config {
|
||||||
mkdir -p -m 777 /srv/kubernetes/
|
mkdir -p -m 777 /srv/kubernetes/
|
||||||
|
|
||||||
### We will move files back and forth between the /srv/kube.. directory.
|
### We will move files back and forth between the /srv/kube.. directory.
|
||||||
### That is how we modulate leader. Each node will continously either
|
### That is how we modulate leader. Each node will continuously either
|
||||||
### ensure that the manifests are in this dir, or else, are in the kubelet manifest dir.
|
### ensure that the manifests are in this dir, or else, are in the kubelet manifest dir.
|
||||||
cp /vagrant/kube-scheduler.manifest /vagrant/kube-controller-manager.manifest /srv/kubernetes
|
cp /vagrant/kube-scheduler.manifest /vagrant/kube-controller-manager.manifest /srv/kubernetes
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,7 @@ go get
|
||||||
go run PetStoreBook.go
|
go run PetStoreBook.go
|
||||||
```
|
```
|
||||||
|
|
||||||
Once the app works the way you want it to, test it in the vagrant recipe below. This will gaurantee that you're local environment isn't doing something that breaks the containers at the versioning level.
|
Once the app works the way you want it to, test it in the vagrant recipe below. This will guarantee that you're local environment isn't doing something that breaks the containers at the versioning level.
|
||||||
|
|
||||||
### Testing
|
### Testing
|
||||||
|
|
||||||
|
|
|
@ -318,7 +318,7 @@ func printPrefixedLines(prefix, s string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// returns either "", or a list of args intended for appending with the
|
// returns either "", or a list of args intended for appending with the
|
||||||
// kubectl command (begining with a space).
|
// kubectl command (beginning with a space).
|
||||||
func kubectlArgs() string {
|
func kubectlArgs() string {
|
||||||
if *checkVersionSkew {
|
if *checkVersionSkew {
|
||||||
return " --match-server-version"
|
return " --match-server-version"
|
||||||
|
|
|
@ -4217,7 +4217,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">hostPath</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">hostPath</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">a HostPath provisioned by a developer or tester; for develment use only; see <a href="http://releases.k8s.io/HEAD/docs/volumes.md#hostpath">http://releases.k8s.io/HEAD/docs/volumes.md#hostpath</a></p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">a HostPath provisioned by a developer or tester; for development use only; see <a href="http://releases.k8s.io/HEAD/docs/volumes.md#hostpath">http://releases.k8s.io/HEAD/docs/volumes.md#hostpath</a></p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_hostpathvolumesource">v1.HostPathVolumeSource</a></p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_hostpathvolumesource">v1.HostPathVolumeSource</a></p></td>
|
||||||
<td class="tableblock halign-left valign-top"></td>
|
<td class="tableblock halign-left valign-top"></td>
|
||||||
|
@ -5092,7 +5092,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">lastTimestamp</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">lastTimestamp</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">the time at which the most recent occurance of this event was recorded</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">the time at which the most recent occurrence of this event was recorded</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">string</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">string</p></td>
|
||||||
<td class="tableblock halign-left valign-top"></td>
|
<td class="tableblock halign-left valign-top"></td>
|
||||||
|
|
|
@ -47,7 +47,7 @@ kube::util::gen-analytics "${KUBE_ROOT}"
|
||||||
|
|
||||||
"${mungedocs}" "--root-dir=${KUBE_ROOT}/docs/" && ret=0 || ret=$?
|
"${mungedocs}" "--root-dir=${KUBE_ROOT}/docs/" && ret=0 || ret=$?
|
||||||
if [[ $ret -eq 1 ]]; then
|
if [[ $ret -eq 1 ]]; then
|
||||||
echo "${KUBE_ROOT}/docs/ requires manual changes. See preceeding errors."
|
echo "${KUBE_ROOT}/docs/ requires manual changes. See preceding errors."
|
||||||
exit 1
|
exit 1
|
||||||
elif [[ $ret -gt 1 ]]; then
|
elif [[ $ret -gt 1 ]]; then
|
||||||
echo "Error running mungedocs."
|
echo "Error running mungedocs."
|
||||||
|
@ -56,7 +56,7 @@ fi
|
||||||
|
|
||||||
"${mungedocs}" "--root-dir=${KUBE_ROOT}/examples/" && ret=0 || ret=$?
|
"${mungedocs}" "--root-dir=${KUBE_ROOT}/examples/" && ret=0 || ret=$?
|
||||||
if [[ $ret -eq 1 ]]; then
|
if [[ $ret -eq 1 ]]; then
|
||||||
echo "${KUBE_ROOT}/examples/ requires manual changes. See preceeding errors."
|
echo "${KUBE_ROOT}/examples/ requires manual changes. See preceding errors."
|
||||||
exit 1
|
exit 1
|
||||||
elif [[ $ret -gt 1 ]]; then
|
elif [[ $ret -gt 1 ]]; then
|
||||||
echo "Error running mungedocs."
|
echo "Error running mungedocs."
|
||||||
|
|
|
@ -57,7 +57,7 @@ def file_passes(filename, extension, ref, regexs):
|
||||||
if p.search(d):
|
if p.search(d):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Replace all occurances of the regex "2015|2014" with "YEAR"
|
# Replace all occurrences of the regex "2015|2014" with "YEAR"
|
||||||
p = regexs["date"]
|
p = regexs["date"]
|
||||||
for i, d in enumerate(data):
|
for i, d in enumerate(data):
|
||||||
(data[i], found) = p.subn('YEAR', d)
|
(data[i], found) = p.subn('YEAR', d)
|
||||||
|
|
|
@ -34,7 +34,7 @@ const (
|
||||||
StatusTooManyRequests = 429
|
StatusTooManyRequests = 429
|
||||||
// HTTP recommendations are for servers to define 5xx error codes
|
// HTTP recommendations are for servers to define 5xx error codes
|
||||||
// for scenarios not covered by behavior. In this case, ServerTimeout
|
// for scenarios not covered by behavior. In this case, ServerTimeout
|
||||||
// is an indication that a transient server error has occured and the
|
// is an indication that a transient server error has occurred and the
|
||||||
// client *should* retry, with an optional Retry-After header to specify
|
// client *should* retry, with an optional Retry-After header to specify
|
||||||
// the back off window.
|
// the back off window.
|
||||||
StatusServerTimeout = 504
|
StatusServerTimeout = 504
|
||||||
|
|
|
@ -53,7 +53,7 @@ var Semantic = conversion.EqualitiesOrDie(
|
||||||
// TODO: if we decide it's important, after we drop v1beta1/2, we
|
// TODO: if we decide it's important, after we drop v1beta1/2, we
|
||||||
// could start comparing format.
|
// could start comparing format.
|
||||||
//
|
//
|
||||||
// Uninitialized quantities are equivilent to 0 quantities.
|
// Uninitialized quantities are equivalent to 0 quantities.
|
||||||
if a.Amount == nil && b.MilliValue() == 0 {
|
if a.Amount == nil && b.MilliValue() == 0 {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
@ -1808,7 +1808,7 @@ type StatusCause struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// CauseType is a machine readable value providing more detail about what
|
// CauseType is a machine readable value providing more detail about what
|
||||||
// occured in a status response. An operation may have multiple causes for a
|
// occurred in a status response. An operation may have multiple causes for a
|
||||||
// status (whether Failure or Success).
|
// status (whether Failure or Success).
|
||||||
type CauseType string
|
type CauseType string
|
||||||
|
|
||||||
|
@ -1897,7 +1897,7 @@ type Event struct {
|
||||||
// The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)
|
// The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)
|
||||||
FirstTimestamp util.Time `json:"firstTimestamp,omitempty"`
|
FirstTimestamp util.Time `json:"firstTimestamp,omitempty"`
|
||||||
|
|
||||||
// The time at which the most recent occurance of this event was recorded.
|
// The time at which the most recent occurrence of this event was recorded.
|
||||||
LastTimestamp util.Time `json:"lastTimestamp,omitempty"`
|
LastTimestamp util.Time `json:"lastTimestamp,omitempty"`
|
||||||
|
|
||||||
// The number of times this event has occurred.
|
// The number of times this event has occurred.
|
||||||
|
|
|
@ -46,7 +46,7 @@ func addConversionFuncs() {
|
||||||
"status.phase",
|
"status.phase",
|
||||||
"spec.nodeName":
|
"spec.nodeName":
|
||||||
return label, value, nil
|
return label, value, nil
|
||||||
// This is for backwards compatability with old v1 clients which send spec.host
|
// This is for backwards compatibility with old v1 clients which send spec.host
|
||||||
case "spec.host":
|
case "spec.host":
|
||||||
return "spec.nodeName", value, nil
|
return "spec.nodeName", value, nil
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -224,7 +224,7 @@ type PersistentVolumeSource struct {
|
||||||
// HostPath represents a directory on the host.
|
// HostPath represents a directory on the host.
|
||||||
// This is useful for development and testing only.
|
// This is useful for development and testing only.
|
||||||
// on-host storage is not supported in any way.
|
// on-host storage is not supported in any way.
|
||||||
HostPath *HostPathVolumeSource `json:"hostPath,omitempty" description:"a HostPath provisioned by a developer or tester; for develment use only; see http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath"`
|
HostPath *HostPathVolumeSource `json:"hostPath,omitempty" description:"a HostPath provisioned by a developer or tester; for development use only; see http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath"`
|
||||||
// Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod
|
// Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod
|
||||||
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" description:"Glusterfs volume resource provisioned by an admin; see http://releases.k8s.io/HEAD/examples/glusterfs/README.md"`
|
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" description:"Glusterfs volume resource provisioned by an admin; see http://releases.k8s.io/HEAD/examples/glusterfs/README.md"`
|
||||||
// NFS represents an NFS mount on the host
|
// NFS represents an NFS mount on the host
|
||||||
|
@ -1715,7 +1715,7 @@ type StatusCause struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// CauseType is a machine readable value providing more detail about what
|
// CauseType is a machine readable value providing more detail about what
|
||||||
// occured in a status response. An operation may have multiple causes for a
|
// occurred in a status response. An operation may have multiple causes for a
|
||||||
// status (whether Failure or Success).
|
// status (whether Failure or Success).
|
||||||
type CauseType string
|
type CauseType string
|
||||||
|
|
||||||
|
@ -1799,8 +1799,8 @@ type Event struct {
|
||||||
// The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)
|
// The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)
|
||||||
FirstTimestamp util.Time `json:"firstTimestamp,omitempty" description:"the time at which the event was first recorded"`
|
FirstTimestamp util.Time `json:"firstTimestamp,omitempty" description:"the time at which the event was first recorded"`
|
||||||
|
|
||||||
// The time at which the most recent occurance of this event was recorded.
|
// The time at which the most recent occurrence of this event was recorded.
|
||||||
LastTimestamp util.Time `json:"lastTimestamp,omitempty" description:"the time at which the most recent occurance of this event was recorded"`
|
LastTimestamp util.Time `json:"lastTimestamp,omitempty" description:"the time at which the most recent occurrence of this event was recorded"`
|
||||||
|
|
||||||
// The number of times this event has occurred.
|
// The number of times this event has occurred.
|
||||||
Count int `json:"count,omitempty" description:"the number of times this event has occurred"`
|
Count int `json:"count,omitempty" description:"the number of times this event has occurred"`
|
||||||
|
|
|
@ -221,7 +221,7 @@ func ValidateObjectMeta(meta *api.ObjectMeta, requiresNamespace bool, nameFn Val
|
||||||
}
|
}
|
||||||
// If the generated name validates, but the calculated value does not, it's a problem with generation, and we
|
// If the generated name validates, but the calculated value does not, it's a problem with generation, and we
|
||||||
// report it here. This may confuse users, but indicates a programming bug and still must be validated.
|
// report it here. This may confuse users, but indicates a programming bug and still must be validated.
|
||||||
// If there are multiple fields out of which one is required then add a or as a seperator
|
// If there are multiple fields out of which one is required then add a or as a separator
|
||||||
if len(meta.Name) == 0 {
|
if len(meta.Name) == 0 {
|
||||||
allErrs = append(allErrs, errs.NewFieldRequired("name or generateName"))
|
allErrs = append(allErrs, errs.NewFieldRequired("name or generateName"))
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -110,7 +110,7 @@ func NotTestAuthorize(t *testing.T) {
|
||||||
{User: uChuck, RO: true, Resource: "pods", NS: "ns1", ExpectAllow: false},
|
{User: uChuck, RO: true, Resource: "pods", NS: "ns1", ExpectAllow: false},
|
||||||
{User: uChuck, RO: true, Resource: "floop", NS: "ns1", ExpectAllow: false},
|
{User: uChuck, RO: true, Resource: "floop", NS: "ns1", ExpectAllow: false},
|
||||||
// Chunk can't access things with no kind or namespace
|
// Chunk can't access things with no kind or namespace
|
||||||
// TODO: find a way to give someone access to miscelaneous endpoints, such as
|
// TODO: find a way to give someone access to miscellaneous endpoints, such as
|
||||||
// /healthz, /version, etc.
|
// /healthz, /version, etc.
|
||||||
{User: uChuck, RO: true, Resource: "", NS: "", ExpectAllow: false},
|
{User: uChuck, RO: true, Resource: "", NS: "", ExpectAllow: false},
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,7 +24,7 @@ import (
|
||||||
// that is used to make an authorization decision.
|
// that is used to make an authorization decision.
|
||||||
type Attributes interface {
|
type Attributes interface {
|
||||||
// The user string which the request was authenticated as, or empty if
|
// The user string which the request was authenticated as, or empty if
|
||||||
// no authentication occured and the request was allowed to proceed.
|
// no authentication occurred and the request was allowed to proceed.
|
||||||
GetUserName() string
|
GetUserName() string
|
||||||
|
|
||||||
// The list of group names the authenticated user is a member of. Can be
|
// The list of group names the authenticated user is a member of. Can be
|
||||||
|
|
|
@ -277,7 +277,7 @@ func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err err
|
||||||
// Pop blocks until an item is added to the queue, and then returns it. If
|
// Pop blocks until an item is added to the queue, and then returns it. If
|
||||||
// multiple items are ready, they are returned in the order in which they were
|
// multiple items are ready, they are returned in the order in which they were
|
||||||
// added/updated. The item is removed from the queue (and the store) before it
|
// added/updated. The item is removed from the queue (and the store) before it
|
||||||
// is returned, so if you don't succesfully process it, you need to add it back
|
// is returned, so if you don't successfully process it, you need to add it back
|
||||||
// with AddIfNotPresent().
|
// with AddIfNotPresent().
|
||||||
//
|
//
|
||||||
// Pop returns a 'Deltas', which has a complete list of all the things
|
// Pop returns a 'Deltas', which has a complete list of all the things
|
||||||
|
|
|
@ -118,7 +118,7 @@ func TestTTLPolicy(t *testing.T) {
|
||||||
}
|
}
|
||||||
fakeTimestampedEntry.timestamp = fakeTime
|
fakeTimestampedEntry.timestamp = fakeTime
|
||||||
if policy.IsExpired(fakeTimestampedEntry) {
|
if policy.IsExpired(fakeTimestampedEntry) {
|
||||||
t.Errorf("TTL Cache should not expire entires before ttl")
|
t.Errorf("TTL Cache should not expire entries before ttl")
|
||||||
}
|
}
|
||||||
fakeTimestampedEntry.timestamp = expiredTime
|
fakeTimestampedEntry.timestamp = expiredTime
|
||||||
if !policy.IsExpired(fakeTimestampedEntry) {
|
if !policy.IsExpired(fakeTimestampedEntry) {
|
||||||
|
|
|
@ -163,7 +163,7 @@ func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) {
|
||||||
// Pop waits until an item is ready and returns it. If multiple items are
|
// Pop waits until an item is ready and returns it. If multiple items are
|
||||||
// ready, they are returned in the order in which they were added/updated.
|
// ready, they are returned in the order in which they were added/updated.
|
||||||
// The item is removed from the queue (and the store) before it is returned,
|
// The item is removed from the queue (and the store) before it is returned,
|
||||||
// so if you don't succesfully process it, you need to add it back with
|
// so if you don't successfully process it, you need to add it back with
|
||||||
// AddIfNotPresent().
|
// AddIfNotPresent().
|
||||||
func (f *FIFO) Pop() interface{} {
|
func (f *FIFO) Pop() interface{} {
|
||||||
f.lock.Lock()
|
f.lock.Lock()
|
||||||
|
|
|
@ -30,7 +30,7 @@ type Enumerator interface {
|
||||||
Get(index int) (object interface{})
|
Get(index int) (object interface{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFunc should return an enumerator that you wish the Poller to proccess.
|
// GetFunc should return an enumerator that you wish the Poller to process.
|
||||||
type GetFunc func() (Enumerator, error)
|
type GetFunc func() (Enumerator, error)
|
||||||
|
|
||||||
// Poller is like Reflector, but it periodically polls instead of watching.
|
// Poller is like Reflector, but it periodically polls instead of watching.
|
||||||
|
|
|
@ -103,7 +103,7 @@ type pIntercept struct {
|
||||||
p float64
|
p float64
|
||||||
}
|
}
|
||||||
|
|
||||||
// P returns a ChaosFunc that fires with a probabilty of p (p between 0.0
|
// P returns a ChaosFunc that fires with a probability of p (p between 0.0
|
||||||
// and 1.0 with 0.0 meaning never and 1.0 meaning always).
|
// and 1.0 with 0.0 meaning never and 1.0 meaning always).
|
||||||
func (s Seed) P(p float64, c Chaos) Chaos {
|
func (s Seed) P(p float64, c Chaos) Chaos {
|
||||||
return pIntercept{c, s, p}
|
return pIntercept{c, s, p}
|
||||||
|
|
|
@ -32,7 +32,7 @@ type FlagSet interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// BindClientConfigFlags registers a standard set of CLI flags for connecting to a Kubernetes API server.
|
// BindClientConfigFlags registers a standard set of CLI flags for connecting to a Kubernetes API server.
|
||||||
// TODO this method is superceded by pkg/client/clientcmd/client_builder.go
|
// TODO this method is superseded by pkg/client/clientcmd/client_builder.go
|
||||||
func BindClientConfigFlags(flags FlagSet, config *Config) {
|
func BindClientConfigFlags(flags FlagSet, config *Config) {
|
||||||
flags.StringVar(&config.Host, "master", config.Host, "The address of the Kubernetes API server")
|
flags.StringVar(&config.Host, "master", config.Host, "The address of the Kubernetes API server")
|
||||||
flags.StringVar(&config.Version, "api_version", config.Version, "The API version to use when talking to the server")
|
flags.StringVar(&config.Version, "api_version", config.Version, "The API version to use when talking to the server")
|
||||||
|
|
|
@ -68,16 +68,16 @@ type EventRecorder interface {
|
||||||
|
|
||||||
// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log.
|
// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log.
|
||||||
type EventBroadcaster interface {
|
type EventBroadcaster interface {
|
||||||
// StartEventWatcher starts sending events recieved from this EventBroadcaster to the given
|
// StartEventWatcher starts sending events received from this EventBroadcaster to the given
|
||||||
// event handler function. The return value can be ignored or used to stop recording, if
|
// event handler function. The return value can be ignored or used to stop recording, if
|
||||||
// desired.
|
// desired.
|
||||||
StartEventWatcher(eventHandler func(*api.Event)) watch.Interface
|
StartEventWatcher(eventHandler func(*api.Event)) watch.Interface
|
||||||
|
|
||||||
// StartRecordingToSink starts sending events recieved from this EventBroadcaster to the given
|
// StartRecordingToSink starts sending events received from this EventBroadcaster to the given
|
||||||
// sink. The return value can be ignored or used to stop recording, if desired.
|
// sink. The return value can be ignored or used to stop recording, if desired.
|
||||||
StartRecordingToSink(sink EventSink) watch.Interface
|
StartRecordingToSink(sink EventSink) watch.Interface
|
||||||
|
|
||||||
// StartLogging starts sending events recieved from this EventBroadcaster to the given logging
|
// StartLogging starts sending events received from this EventBroadcaster to the given logging
|
||||||
// function. The return value can be ignored or used to stop recording, if desired.
|
// function. The return value can be ignored or used to stop recording, if desired.
|
||||||
StartLogging(logf func(format string, args ...interface{})) watch.Interface
|
StartLogging(logf func(format string, args ...interface{})) watch.Interface
|
||||||
|
|
||||||
|
@ -95,7 +95,7 @@ type eventBroadcasterImpl struct {
|
||||||
*watch.Broadcaster
|
*watch.Broadcaster
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartRecordingToSink starts sending events recieved from the specified eventBroadcaster to the given sink.
|
// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink.
|
||||||
// The return value can be ignored or used to stop recording, if desired.
|
// The return value can be ignored or used to stop recording, if desired.
|
||||||
// TODO: make me an object with parameterizable queue length and retry interval
|
// TODO: make me an object with parameterizable queue length and retry interval
|
||||||
func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface {
|
func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface {
|
||||||
|
@ -195,7 +195,7 @@ func recordEvent(sink EventSink, event *api.Event, updateExistingEvent bool) boo
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartLogging starts sending events recieved from this EventBroadcaster to the given logging function.
|
// StartLogging starts sending events received from this EventBroadcaster to the given logging function.
|
||||||
// The return value can be ignored or used to stop recording, if desired.
|
// The return value can be ignored or used to stop recording, if desired.
|
||||||
func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface {
|
func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface {
|
||||||
return eventBroadcaster.StartEventWatcher(
|
return eventBroadcaster.StartEventWatcher(
|
||||||
|
@ -204,7 +204,7 @@ func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format stri
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartEventWatcher starts sending events recieved from this EventBroadcaster to the given event handler function.
|
// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function.
|
||||||
// The return value can be ignored or used to stop recording, if desired.
|
// The return value can be ignored or used to stop recording, if desired.
|
||||||
func (eventBroadcaster *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*api.Event)) watch.Interface {
|
func (eventBroadcaster *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*api.Event)) watch.Interface {
|
||||||
watcher := eventBroadcaster.Watch()
|
watcher := eventBroadcaster.Watch()
|
||||||
|
|
|
@ -318,7 +318,7 @@ func validateEvent(actualEvent *api.Event, expectedEvent *api.Event, t *testing.
|
||||||
}
|
}
|
||||||
if actualEvent.FirstTimestamp.Equal(actualEvent.LastTimestamp) {
|
if actualEvent.FirstTimestamp.Equal(actualEvent.LastTimestamp) {
|
||||||
if expectCompression {
|
if expectCompression {
|
||||||
t.Errorf("FirstTimestamp (%q) and LastTimestamp (%q) must be equal to indicate only one occurance of the event, but were different. Actual Event: %#v", actualEvent.FirstTimestamp, actualEvent.LastTimestamp, *actualEvent)
|
t.Errorf("FirstTimestamp (%q) and LastTimestamp (%q) must be equal to indicate only one occurrence of the event, but were different. Actual Event: %#v", actualEvent.FirstTimestamp, actualEvent.LastTimestamp, *actualEvent)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if !expectCompression {
|
if !expectCompression {
|
||||||
|
|
|
@ -25,13 +25,13 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type history struct {
|
type history struct {
|
||||||
// The number of times the event has occured since first occurance.
|
// The number of times the event has occurred since first occurrence.
|
||||||
Count int
|
Count int
|
||||||
|
|
||||||
// The time at which the event was first recorded.
|
// The time at which the event was first recorded.
|
||||||
FirstTimestamp util.Time
|
FirstTimestamp util.Time
|
||||||
|
|
||||||
// The unique name of the first occurance of this event
|
// The unique name of the first occurrence of this event
|
||||||
Name string
|
Name string
|
||||||
|
|
||||||
// Resource version returned from previous interaction with server
|
// Resource version returned from previous interaction with server
|
||||||
|
|
|
@ -641,7 +641,7 @@ func (r *Request) Upgrade(config *Config, newRoundTripperFunc func(*tls.Config)
|
||||||
|
|
||||||
// request connects to the server and invokes the provided function when a server response is
|
// request connects to the server and invokes the provided function when a server response is
|
||||||
// received. It handles retry behavior and up front validation of requests. It wil invoke
|
// received. It handles retry behavior and up front validation of requests. It wil invoke
|
||||||
// fn at most once. It will return an error if a problem occured prior to connecting to the
|
// fn at most once. It will return an error if a problem occurred prior to connecting to the
|
||||||
// server - the provided function is responsible for handling server errors.
|
// server - the provided function is responsible for handling server errors.
|
||||||
func (r *Request) request(fn func(*http.Request, *http.Response)) error {
|
func (r *Request) request(fn func(*http.Request, *http.Response)) error {
|
||||||
if r.err != nil {
|
if r.err != nil {
|
||||||
|
|
|
@ -164,7 +164,7 @@ func (md FakeMasterDetector) Done() <-chan struct{} {
|
||||||
return md.done
|
return md.done
|
||||||
}
|
}
|
||||||
|
|
||||||
// Auxilliary functions
|
// Auxiliary functions
|
||||||
|
|
||||||
func makeHttpMocks() (*httptest.Server, *http.Client, *http.Transport) {
|
func makeHttpMocks() (*httptest.Server, *http.Client, *http.Transport) {
|
||||||
httpServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
httpServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
|
@ -44,7 +44,7 @@ const (
|
||||||
// FakeNodeHandler is a fake implementation of NodesInterface and NodeInterface. It
|
// FakeNodeHandler is a fake implementation of NodesInterface and NodeInterface. It
|
||||||
// allows test cases to have fine-grained control over mock behaviors. We also need
|
// allows test cases to have fine-grained control over mock behaviors. We also need
|
||||||
// PodsInterface and PodInterface to test list & delet pods, which is implemented in
|
// PodsInterface and PodInterface to test list & delet pods, which is implemented in
|
||||||
// the embeded client.Fake field.
|
// the embedded client.Fake field.
|
||||||
type FakeNodeHandler struct {
|
type FakeNodeHandler struct {
|
||||||
*testclient.Fake
|
*testclient.Fake
|
||||||
|
|
||||||
|
|
|
@ -43,7 +43,7 @@ func TestAddNode(t *testing.T) {
|
||||||
|
|
||||||
queuePattern := []string{"first", "second", "third"}
|
queuePattern := []string{"first", "second", "third"}
|
||||||
if len(evictor.queue.queue) != len(queuePattern) {
|
if len(evictor.queue.queue) != len(queuePattern) {
|
||||||
t.Fatalf("Queue %v should have lenght %d", evictor.queue.queue, len(queuePattern))
|
t.Fatalf("Queue %v should have length %d", evictor.queue.queue, len(queuePattern))
|
||||||
}
|
}
|
||||||
if !CheckQueueEq(queuePattern, evictor.queue.queue) {
|
if !CheckQueueEq(queuePattern, evictor.queue.queue) {
|
||||||
t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern)
|
t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern)
|
||||||
|
@ -89,7 +89,7 @@ func TestDelNode(t *testing.T) {
|
||||||
|
|
||||||
queuePattern = []string{"first", "third"}
|
queuePattern = []string{"first", "third"}
|
||||||
if len(evictor.queue.queue) != len(queuePattern) {
|
if len(evictor.queue.queue) != len(queuePattern) {
|
||||||
t.Fatalf("Queue %v should have lenght %d", evictor.queue.queue, len(queuePattern))
|
t.Fatalf("Queue %v should have length %d", evictor.queue.queue, len(queuePattern))
|
||||||
}
|
}
|
||||||
if !CheckQueueEq(queuePattern, evictor.queue.queue) {
|
if !CheckQueueEq(queuePattern, evictor.queue.queue) {
|
||||||
t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern)
|
t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern)
|
||||||
|
@ -111,7 +111,7 @@ func TestDelNode(t *testing.T) {
|
||||||
|
|
||||||
queuePattern = []string{"first", "second"}
|
queuePattern = []string{"first", "second"}
|
||||||
if len(evictor.queue.queue) != len(queuePattern) {
|
if len(evictor.queue.queue) != len(queuePattern) {
|
||||||
t.Fatalf("Queue %v should have lenght %d", evictor.queue.queue, len(queuePattern))
|
t.Fatalf("Queue %v should have length %d", evictor.queue.queue, len(queuePattern))
|
||||||
}
|
}
|
||||||
if !CheckQueueEq(queuePattern, evictor.queue.queue) {
|
if !CheckQueueEq(queuePattern, evictor.queue.queue) {
|
||||||
t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern)
|
t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern)
|
||||||
|
|
|
@ -139,7 +139,7 @@ func NewReplicationManager(kubeClient client.Interface, burstReplicas int) *Repl
|
||||||
}
|
}
|
||||||
rm.enqueueController(cur)
|
rm.enqueueController(cur)
|
||||||
},
|
},
|
||||||
// This will enter the sync loop and no-op, becuase the controller has been deleted from the store.
|
// This will enter the sync loop and no-op, because the controller has been deleted from the store.
|
||||||
// Note that deleting a controller immediately after scaling it to 0 will not work. The recommended
|
// Note that deleting a controller immediately after scaling it to 0 will not work. The recommended
|
||||||
// way of achieving this is by performing a `stop` operation on the controller.
|
// way of achieving this is by performing a `stop` operation on the controller.
|
||||||
DeleteFunc: rm.enqueueController,
|
DeleteFunc: rm.enqueueController,
|
||||||
|
|
|
@ -46,10 +46,10 @@ type FakePodControl struct {
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Give each test that starts a background controller upto 1/2 a second.
|
// Give each test that starts a background controller up to 1/2 a second.
|
||||||
// Since we need to start up a goroutine to test watch, this routine needs
|
// Since we need to start up a goroutine to test watch, this routine needs
|
||||||
// to get cpu before the test can complete. If the test is starved of cpu,
|
// to get cpu before the test can complete. If the test is starved of cpu,
|
||||||
// the watch test will take upto 1/2 a second before timing out.
|
// the watch test will take up to 1/2 a second before timing out.
|
||||||
const controllerTimeout = 500 * time.Millisecond
|
const controllerTimeout = 500 * time.Millisecond
|
||||||
|
|
||||||
var alwaysReady = func() bool { return true }
|
var alwaysReady = func() bool { return true }
|
||||||
|
@ -638,7 +638,7 @@ func TestUpdatePods(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestControllerUpdateRequeue(t *testing.T) {
|
func TestControllerUpdateRequeue(t *testing.T) {
|
||||||
// This server should force a requeue of the controller becuase it fails to update status.Replicas.
|
// This server should force a requeue of the controller because it fails to update status.Replicas.
|
||||||
fakeHandler := util.FakeHandler{
|
fakeHandler := util.FakeHandler{
|
||||||
StatusCode: 500,
|
StatusCode: 500,
|
||||||
ResponseBody: "",
|
ResponseBody: "",
|
||||||
|
@ -806,7 +806,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||||
|
|
||||||
// Create/Delete the last pod
|
// Create/Delete the last pod
|
||||||
// The last add pod will decrease the expectation of the rc to 0,
|
// The last add pod will decrease the expectation of the rc to 0,
|
||||||
// which will cause it to create/delete the remaining replicas upto burstReplicas.
|
// which will cause it to create/delete the remaining replicas up to burstReplicas.
|
||||||
if replicas != 0 {
|
if replicas != 0 {
|
||||||
manager.podStore.Store.Add(&pods.Items[expectedPods-1])
|
manager.podStore.Store.Add(&pods.Items[expectedPods-1])
|
||||||
manager.addPod(&pods.Items[expectedPods-1])
|
manager.addPod(&pods.Items[expectedPods-1])
|
||||||
|
|
|
@ -242,7 +242,7 @@ func (s *ServiceController) processDelta(delta *cache.Delta) (error, bool) {
|
||||||
return err, retry
|
return err, retry
|
||||||
}
|
}
|
||||||
// Always update the cache upon success.
|
// Always update the cache upon success.
|
||||||
// NOTE: Since we update the cached service if and only if we successully
|
// NOTE: Since we update the cached service if and only if we successfully
|
||||||
// processed it, a cached service being nil implies that it hasn't yet
|
// processed it, a cached service being nil implies that it hasn't yet
|
||||||
// been successfully processed.
|
// been successfully processed.
|
||||||
cachedService.appliedState = service
|
cachedService.appliedState = service
|
||||||
|
|
|
@ -111,7 +111,7 @@ type Scope interface {
|
||||||
DefaultConvert(src, dest interface{}, flags FieldMatchingFlags) error
|
DefaultConvert(src, dest interface{}, flags FieldMatchingFlags) error
|
||||||
|
|
||||||
// If registered, returns a function applying defaults for objects of a given type.
|
// If registered, returns a function applying defaults for objects of a given type.
|
||||||
// Used for automatically generating convertion functions.
|
// Used for automatically generating conversion functions.
|
||||||
DefaultingInterface(inType reflect.Type) (interface{}, bool)
|
DefaultingInterface(inType reflect.Type) (interface{}, bool)
|
||||||
|
|
||||||
// SrcTags and DestTags contain the struct tags that src and dest had, respectively.
|
// SrcTags and DestTags contain the struct tags that src and dest had, respectively.
|
||||||
|
@ -649,7 +649,7 @@ type kvValue interface {
|
||||||
tagOf(key string) reflect.StructTag
|
tagOf(key string) reflect.StructTag
|
||||||
// Will return the zero Value if the key doesn't exist.
|
// Will return the zero Value if the key doesn't exist.
|
||||||
value(key string) reflect.Value
|
value(key string) reflect.Value
|
||||||
// Maps require explict setting-- will do nothing for structs.
|
// Maps require explicit setting-- will do nothing for structs.
|
||||||
// Returns false on failure.
|
// Returns false on failure.
|
||||||
confirmSet(key string, v reflect.Value) bool
|
confirmSet(key string, v reflect.Value) bool
|
||||||
}
|
}
|
||||||
|
|
|
@ -83,7 +83,7 @@ func DefaultStacktracePred(status int) bool {
|
||||||
//
|
//
|
||||||
// defer NewLogged(req, &w).StacktraceWhen(StatusIsNot(200, 202)).Log()
|
// defer NewLogged(req, &w).StacktraceWhen(StatusIsNot(200, 202)).Log()
|
||||||
//
|
//
|
||||||
// (Only the call to Log() is defered, so you can set everything up in one line!)
|
// (Only the call to Log() is deferred, so you can set everything up in one line!)
|
||||||
//
|
//
|
||||||
// Note that this *changes* your writer, to route response writing actions
|
// Note that this *changes* your writer, to route response writing actions
|
||||||
// through the logger.
|
// through the logger.
|
||||||
|
|
|
@ -399,7 +399,7 @@ func ExamplePrintServiceWithNamespacesAndLabels() {
|
||||||
// ||
|
// ||
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNormalizationFuncGlobalExistance(t *testing.T) {
|
func TestNormalizationFuncGlobalExistence(t *testing.T) {
|
||||||
// This test can be safely deleted when we will not support multiple flag formats
|
// This test can be safely deleted when we will not support multiple flag formats
|
||||||
root := NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr)
|
root := NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr)
|
||||||
|
|
||||||
|
|
|
@ -95,13 +95,13 @@ func TestSetCurrentContext(t *testing.T) {
|
||||||
test.run(t)
|
test.run(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetNonExistantContext(t *testing.T) {
|
func TestSetNonExistentContext(t *testing.T) {
|
||||||
expectedConfig := newRedFederalCowHammerConfig()
|
expectedConfig := newRedFederalCowHammerConfig()
|
||||||
test := configCommandTest{
|
test := configCommandTest{
|
||||||
args: []string{"use-context", "non-existant-config"},
|
args: []string{"use-context", "non-existent-config"},
|
||||||
startingConfig: expectedConfig,
|
startingConfig: expectedConfig,
|
||||||
expectedConfig: expectedConfig,
|
expectedConfig: expectedConfig,
|
||||||
expectedOutputs: []string{`No context exists with the name: "non-existant-config"`},
|
expectedOutputs: []string{`No context exists with the name: "non-existent-config"`},
|
||||||
}
|
}
|
||||||
test.run(t)
|
test.run(t)
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,13 +28,13 @@ import (
|
||||||
func NewCmdNamespace(out io.Writer) *cobra.Command {
|
func NewCmdNamespace(out io.Writer) *cobra.Command {
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "namespace [namespace]",
|
Use: "namespace [namespace]",
|
||||||
Short: "SUPERCEDED: Set and view the current Kubernetes namespace",
|
Short: "SUPERSEDED: Set and view the current Kubernetes namespace",
|
||||||
Long: `SUPERCEDED: Set and view the current Kubernetes namespace scope for command line requests.
|
Long: `SUPERSEDED: Set and view the current Kubernetes namespace scope for command line requests.
|
||||||
|
|
||||||
namespace has been superceded by the context.namespace field of .kubeconfig files. See 'kubectl config set-context --help' for more details.
|
namespace has been superseded by the context.namespace field of .kubeconfig files. See 'kubectl config set-context --help' for more details.
|
||||||
`,
|
`,
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
util.CheckErr(fmt.Errorf("namespace has been superceded by the context.namespace field of .kubeconfig files. See 'kubectl config set-context --help' for more details."))
|
util.CheckErr(fmt.Errorf("namespace has been superseded by the context.namespace field of .kubeconfig files. See 'kubectl config set-context --help' for more details."))
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return cmd
|
return cmd
|
||||||
|
|
|
@ -248,7 +248,7 @@ func (f *Factory) BindFlags(flags *pflag.FlagSet) {
|
||||||
// to do that automatically for every subcommand.
|
// to do that automatically for every subcommand.
|
||||||
flags.BoolVar(&f.clients.matchVersion, FlagMatchBinaryVersion, false, "Require server version to match client version")
|
flags.BoolVar(&f.clients.matchVersion, FlagMatchBinaryVersion, false, "Require server version to match client version")
|
||||||
|
|
||||||
// Normalize all flags that are comming from other packages or pre-configurations
|
// Normalize all flags that are coming from other packages or pre-configurations
|
||||||
// a.k.a. change all "_" to "-". e.g. glog package
|
// a.k.a. change all "_" to "-". e.g. glog package
|
||||||
flags.SetNormalizeFunc(util.WordSepNormalizeFunc)
|
flags.SetNormalizeFunc(util.WordSepNormalizeFunc)
|
||||||
}
|
}
|
||||||
|
|
|
@ -176,7 +176,7 @@ type handlerEntry struct {
|
||||||
// HumanReadablePrinter is an implementation of ResourcePrinter which attempts to provide
|
// HumanReadablePrinter is an implementation of ResourcePrinter which attempts to provide
|
||||||
// more elegant output. It is not threadsafe, but you may call PrintObj repeatedly; headers
|
// more elegant output. It is not threadsafe, but you may call PrintObj repeatedly; headers
|
||||||
// will only be printed if the object type changes. This makes it useful for printing items
|
// will only be printed if the object type changes. This makes it useful for printing items
|
||||||
// recieved from watches.
|
// received from watches.
|
||||||
type HumanReadablePrinter struct {
|
type HumanReadablePrinter struct {
|
||||||
handlerMap map[reflect.Type]*handlerEntry
|
handlerMap map[reflect.Type]*handlerEntry
|
||||||
noHeaders bool
|
noHeaders bool
|
||||||
|
|
|
@ -91,7 +91,7 @@ func expectPodUpdate(t *testing.T, ch <-chan kubelet.PodUpdate, expected ...kube
|
||||||
for i := range expected {
|
for i := range expected {
|
||||||
update := <-ch
|
update := <-ch
|
||||||
sort.Sort(sortedPods(update.Pods))
|
sort.Sort(sortedPods(update.Pods))
|
||||||
// Clear the annotation field before the comparision.
|
// Clear the annotation field before the comparison.
|
||||||
// TODO: consider mock out recordFirstSeen in config.go
|
// TODO: consider mock out recordFirstSeen in config.go
|
||||||
for _, pod := range update.Pods {
|
for _, pod := range update.Pods {
|
||||||
delete(pod.Annotations, kubelet.ConfigFirstSeenAnnotationKey)
|
delete(pod.Annotations, kubelet.ConfigFirstSeenAnnotationKey)
|
||||||
|
|
|
@ -39,7 +39,7 @@ func recordOperation(operation string, start time.Time) {
|
||||||
metrics.DockerOperationsLatency.WithLabelValues(operation).Observe(metrics.SinceInMicroseconds(start))
|
metrics.DockerOperationsLatency.WithLabelValues(operation).Observe(metrics.SinceInMicroseconds(start))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Record error for metric if an error occured.
|
// Record error for metric if an error occurred.
|
||||||
func recordError(operation string, err error) {
|
func recordError(operation string, err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
metrics.DockerErrors.WithLabelValues(operation).Inc()
|
metrics.DockerErrors.WithLabelValues(operation).Inc()
|
||||||
|
|
|
@ -1298,7 +1298,7 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe
|
||||||
}
|
}
|
||||||
|
|
||||||
// currently, Docker does not have a flag by which the ndots option can be passed.
|
// currently, Docker does not have a flag by which the ndots option can be passed.
|
||||||
// (A seperate issue has been filed with Docker to add a ndots flag)
|
// (A separate issue has been filed with Docker to add a ndots flag)
|
||||||
// The addNDotsOption call appends the ndots option to the resolv.conf file generated by docker.
|
// The addNDotsOption call appends the ndots option to the resolv.conf file generated by docker.
|
||||||
// This resolv.conf file is shared by all containers of the same pod, and needs to be modified only once per pod.
|
// This resolv.conf file is shared by all containers of the same pod, and needs to be modified only once per pod.
|
||||||
// we modify it when the pause container is created since it is the first container created in the pod since it holds
|
// we modify it when the pause container is created since it is the first container created in the pod since it holds
|
||||||
|
|
|
@ -217,7 +217,7 @@ func (im *realImageManager) GarbageCollect() error {
|
||||||
|
|
||||||
// Tries to free bytesToFree worth of images on the disk.
|
// Tries to free bytesToFree worth of images on the disk.
|
||||||
//
|
//
|
||||||
// Returns the number of bytes free and an error if any occured. The number of
|
// Returns the number of bytes free and an error if any occurred. The number of
|
||||||
// bytes freed is always returned.
|
// bytes freed is always returned.
|
||||||
// Note that error may be nil and the number of bytes free may be less
|
// Note that error may be nil and the number of bytes free may be less
|
||||||
// than bytesToFree.
|
// than bytesToFree.
|
||||||
|
|
|
@ -1701,7 +1701,7 @@ func (kl *Kubelet) handleNotFittingPods(pods []*api.Pod) []*api.Pod {
|
||||||
// that don't fit on the node, and may reject pods if node is overcommitted.
|
// that don't fit on the node, and may reject pods if node is overcommitted.
|
||||||
func (kl *Kubelet) admitPods(allPods []*api.Pod, podSyncTypes map[types.UID]SyncPodType) []*api.Pod {
|
func (kl *Kubelet) admitPods(allPods []*api.Pod, podSyncTypes map[types.UID]SyncPodType) []*api.Pod {
|
||||||
// Pod phase progresses monotonically. Once a pod has reached a final state,
|
// Pod phase progresses monotonically. Once a pod has reached a final state,
|
||||||
// it should never leave irregardless of the restart policy. The statuses
|
// it should never leave regardless of the restart policy. The statuses
|
||||||
// of such pods should not be changed, and there is no need to sync them.
|
// of such pods should not be changed, and there is no need to sync them.
|
||||||
// TODO: the logic here does not handle two cases:
|
// TODO: the logic here does not handle two cases:
|
||||||
// 1. If the containers were removed immediately after they died, kubelet
|
// 1. If the containers were removed immediately after they died, kubelet
|
||||||
|
|
|
@ -409,7 +409,7 @@ func TestMountExternalVolumes(t *testing.T) {
|
||||||
}
|
}
|
||||||
podVolumes, err := kubelet.mountExternalVolumes(&pod)
|
podVolumes, err := kubelet.mountExternalVolumes(&pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Expected sucess: %v", err)
|
t.Errorf("Expected success: %v", err)
|
||||||
}
|
}
|
||||||
expectedPodVolumes := []string{"vol1"}
|
expectedPodVolumes := []string{"vol1"}
|
||||||
if len(expectedPodVolumes) != len(podVolumes) {
|
if len(expectedPodVolumes) != len(podVolumes) {
|
||||||
|
|
|
@ -22,7 +22,7 @@ limitations under the License.
|
||||||
// plugin-name/<other-files>
|
// plugin-name/<other-files>
|
||||||
// where, 'executable' has the following requirements:
|
// where, 'executable' has the following requirements:
|
||||||
// - should have exec permissions
|
// - should have exec permissions
|
||||||
// - should give non-zero exit code on failure, and zero on sucess
|
// - should give non-zero exit code on failure, and zero on success
|
||||||
// - the arguments will be <action> <pod_namespace> <pod_name> <docker_id_of_infra_container>
|
// - the arguments will be <action> <pod_namespace> <pod_name> <docker_id_of_infra_container>
|
||||||
// whereupon, <action> will be one of:
|
// whereupon, <action> will be one of:
|
||||||
// - init, called when the kubelet loads the plugin
|
// - init, called when the kubelet loads the plugin
|
||||||
|
|
|
@ -636,7 +636,7 @@ func (r *runtime) makeRuntimePod(unitName string, podInfos map[string]*podInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPods runs 'systemctl list-unit' and 'rkt list' to get the list of rkt pods.
|
// GetPods runs 'systemctl list-unit' and 'rkt list' to get the list of rkt pods.
|
||||||
// Then it will use the result to contruct a list of container runtime pods.
|
// Then it will use the result to construct a list of container runtime pods.
|
||||||
// If all is false, then only running pods will be returned, otherwise all pods will be
|
// If all is false, then only running pods will be returned, otherwise all pods will be
|
||||||
// returned.
|
// returned.
|
||||||
func (r *runtime) GetPods(all bool) ([]*kubecontainer.Pod, error) {
|
func (r *runtime) GetPods(all bool) ([]*kubecontainer.Pod, error) {
|
||||||
|
|
|
@ -686,11 +686,11 @@ type StatsRequest struct {
|
||||||
NumStats int `json:"num_stats,omitempty"`
|
NumStats int `json:"num_stats,omitempty"`
|
||||||
|
|
||||||
// Start time for which to query information.
|
// Start time for which to query information.
|
||||||
// If ommitted, the beginning of time is assumed.
|
// If omitted, the beginning of time is assumed.
|
||||||
Start time.Time `json:"start,omitempty"`
|
Start time.Time `json:"start,omitempty"`
|
||||||
|
|
||||||
// End time for which to query information.
|
// End time for which to query information.
|
||||||
// If ommitted, current time is assumed.
|
// If omitted, current time is assumed.
|
||||||
End time.Time `json:"end,omitempty"`
|
End time.Time `json:"end,omitempty"`
|
||||||
|
|
||||||
// Whether to also include information from subcontainers.
|
// Whether to also include information from subcontainers.
|
||||||
|
|
|
@ -313,7 +313,7 @@ func setDefaults(c *Config) {
|
||||||
// Public fields:
|
// Public fields:
|
||||||
// Handler -- The returned master has a field TopHandler which is an
|
// Handler -- The returned master has a field TopHandler which is an
|
||||||
// http.Handler which handles all the endpoints provided by the master,
|
// http.Handler which handles all the endpoints provided by the master,
|
||||||
// including the API, the UI, and miscelaneous debugging endpoints. All
|
// including the API, the UI, and miscellaneous debugging endpoints. All
|
||||||
// these are subject to authorization and authentication.
|
// these are subject to authorization and authentication.
|
||||||
// InsecureHandler -- an http.Handler which handles all the same
|
// InsecureHandler -- an http.Handler which handles all the same
|
||||||
// endpoints as Handler, but no authorization and authentication is done.
|
// endpoints as Handler, but no authorization and authentication is done.
|
||||||
|
|
|
@ -244,7 +244,7 @@ func (proxier *Proxier) addServiceOnPort(service proxy.ServicePortName, protocol
|
||||||
socket: sock,
|
socket: sock,
|
||||||
timeout: timeout,
|
timeout: timeout,
|
||||||
sessionAffinityType: api.ServiceAffinityNone, // default
|
sessionAffinityType: api.ServiceAffinityNone, // default
|
||||||
stickyMaxAgeMinutes: 180, // TODO: paramaterize this in the API.
|
stickyMaxAgeMinutes: 180, // TODO: parameterize this in the API.
|
||||||
}
|
}
|
||||||
proxier.setServiceInfo(service, si)
|
proxier.setServiceInfo(service, si)
|
||||||
|
|
||||||
|
|
|
@ -490,7 +490,7 @@ func (e *Etcd) WatchPredicate(ctx api.Context, m generic.Matcher, resourceVersio
|
||||||
// if the TTL cannot be calculated. The defaultTTL is changed to 1 if less than zero. Zero means
|
// if the TTL cannot be calculated. The defaultTTL is changed to 1 if less than zero. Zero means
|
||||||
// no TTL, not expire immediately.
|
// no TTL, not expire immediately.
|
||||||
func (e *Etcd) calculateTTL(obj runtime.Object, defaultTTL int64, update bool) (ttl uint64, err error) {
|
func (e *Etcd) calculateTTL(obj runtime.Object, defaultTTL int64, update bool) (ttl uint64, err error) {
|
||||||
// etcd may return a negative TTL for a node if the expiration has not occured due
|
// etcd may return a negative TTL for a node if the expiration has not occurred due
|
||||||
// to server lag - we will ensure that the value is at least set.
|
// to server lag - we will ensure that the value is at least set.
|
||||||
if defaultTTL < 0 {
|
if defaultTTL < 0 {
|
||||||
defaultTTL = 1
|
defaultTTL = 1
|
||||||
|
|
|
@ -40,6 +40,6 @@ type RangeRegistry interface {
|
||||||
// or an error if the allocation could not be retrieved.
|
// or an error if the allocation could not be retrieved.
|
||||||
Get() (*api.RangeAllocation, error)
|
Get() (*api.RangeAllocation, error)
|
||||||
// CreateOrUpdate should create or update the provide allocation, unless a conflict
|
// CreateOrUpdate should create or update the provide allocation, unless a conflict
|
||||||
// has occured since the item was last created.
|
// has occurred since the item was last created.
|
||||||
CreateOrUpdate(*api.RangeAllocation) error
|
CreateOrUpdate(*api.RangeAllocation) error
|
||||||
}
|
}
|
||||||
|
|
|
@ -129,7 +129,7 @@ type SelfLinker interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// All api types must support the Object interface. It's deliberately tiny so that this is not an onerous
|
// All api types must support the Object interface. It's deliberately tiny so that this is not an onerous
|
||||||
// burden. Implement it with a pointer reciever; this will allow us to use the go compiler to check the
|
// burden. Implement it with a pointer receiver; this will allow us to use the go compiler to check the
|
||||||
// one thing about our objects that it's capable of checking for us.
|
// one thing about our objects that it's capable of checking for us.
|
||||||
type Object interface {
|
type Object interface {
|
||||||
// This function is used only to enforce membership. It's never called.
|
// This function is used only to enforce membership. It's never called.
|
||||||
|
|
|
@ -83,7 +83,7 @@ func (self *Scheme) embeddedObjectToRawExtension(in *EmbeddedObject, out *RawExt
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy the kind field into the ouput object.
|
// Copy the kind field into the output object.
|
||||||
err = s.Convert(
|
err = s.Convert(
|
||||||
&emptyPlugin{PluginBase: PluginBase{Kind: kind}},
|
&emptyPlugin{PluginBase: PluginBase{Kind: kind}},
|
||||||
outObj,
|
outObj,
|
||||||
|
|
|
@ -476,7 +476,7 @@ func (h *etcdHelper) getFromCache(index uint64) (runtime.Object, bool) {
|
||||||
}()
|
}()
|
||||||
obj, found := h.cache.Get(index)
|
obj, found := h.cache.Get(index)
|
||||||
if found {
|
if found {
|
||||||
// We should not return the object itself to avoid poluting the cache if someone
|
// We should not return the object itself to avoid polluting the cache if someone
|
||||||
// modifies returned values.
|
// modifies returned values.
|
||||||
objCopy, err := h.copier.Copy(obj.(runtime.Object))
|
objCopy, err := h.copier.Copy(obj.(runtime.Object))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -34,7 +34,7 @@ func IsEtcdNotFound(err error) bool {
|
||||||
return isEtcdErrorNum(err, tools.EtcdErrorCodeNotFound)
|
return isEtcdErrorNum(err, tools.EtcdErrorCodeNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsEtcdNodeExist returns true iff err is an etcd node aleady exist error.
|
// IsEtcdNodeExist returns true iff err is an etcd node already exist error.
|
||||||
func IsEtcdNodeExist(err error) bool {
|
func IsEtcdNodeExist(err error) bool {
|
||||||
return isEtcdErrorNum(err, tools.EtcdErrorCodeNodeExist)
|
return isEtcdErrorNum(err, tools.EtcdErrorCodeNodeExist)
|
||||||
}
|
}
|
||||||
|
|
|
@ -124,10 +124,10 @@ type Interface interface {
|
||||||
// GuaranteedUpdate keeps calling 'tryUpdate()' to update key 'key' (of type 'ptrToType')
|
// GuaranteedUpdate keeps calling 'tryUpdate()' to update key 'key' (of type 'ptrToType')
|
||||||
// retrying the update until success if there is index conflict.
|
// retrying the update until success if there is index conflict.
|
||||||
// Note that object passed to tryUpdate may change acress incovations of tryUpdate() if
|
// Note that object passed to tryUpdate may change acress incovations of tryUpdate() if
|
||||||
// other writers are simultanously updateing it, to tryUpdate() needs to take into account
|
// other writers are simultaneously updateing it, to tryUpdate() needs to take into account
|
||||||
// the current contents of the object when deciding how the update object should look.
|
// the current contents of the object when deciding how the update object should look.
|
||||||
//
|
//
|
||||||
// Exmaple:
|
// Example:
|
||||||
//
|
//
|
||||||
// s := /* implementation of Interface */
|
// s := /* implementation of Interface */
|
||||||
// err := s.GuaranteedUpdate(
|
// err := s.GuaranteedUpdate(
|
||||||
|
|
|
@ -7987,7 +7987,7 @@ function marked(src, opt, callback) {
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
e.message += '\nPlease report this to https://github.com/chjj/marked.';
|
e.message += '\nPlease report this to https://github.com/chjj/marked.';
|
||||||
if ((opt || marked.defaults).silent) {
|
if ((opt || marked.defaults).silent) {
|
||||||
return '<p>An error occured:</p><pre>'
|
return '<p>An error occurred:</p><pre>'
|
||||||
+ escape(e.message + '', true)
|
+ escape(e.message + '', true)
|
||||||
+ '</pre>';
|
+ '</pre>';
|
||||||
}
|
}
|
||||||
|
@ -8641,7 +8641,7 @@ var Shred = function(options) {
|
||||||
this.logCurl = options.logCurl || false;
|
this.logCurl = options.logCurl || false;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Most of the real work is done in the request and reponse classes.
|
// Most of the real work is done in the request and response classes.
|
||||||
|
|
||||||
Shred.Request = require("./shred/request");
|
Shred.Request = require("./shred/request");
|
||||||
Shred.Response = require("./shred/response");
|
Shred.Response = require("./shred/response");
|
||||||
|
@ -9840,7 +9840,7 @@ Changelog:
|
||||||
a regress. I appologize for that.
|
a regress. I appologize for that.
|
||||||
|
|
||||||
2010.05.09 - 0.5:
|
2010.05.09 - 0.5:
|
||||||
- bug fix: 0 is now preceeded with a + sign
|
- bug fix: 0 is now preceded with a + sign
|
||||||
- bug fix: the sign was not at the right position on padded results (Kamal Abdali)
|
- bug fix: the sign was not at the right position on padded results (Kamal Abdali)
|
||||||
- switched from GPL to BSD license
|
- switched from GPL to BSD license
|
||||||
|
|
||||||
|
@ -46921,7 +46921,7 @@ window.SwaggerUi.Views = {};
|
||||||
});
|
});
|
||||||
} else if (typeof exports === 'object') {
|
} else if (typeof exports === 'object') {
|
||||||
// Node. Does not work with strict CommonJS, but
|
// Node. Does not work with strict CommonJS, but
|
||||||
// only CommonJS-like enviroments that support module.exports,
|
// only CommonJS-like environments that support module.exports,
|
||||||
// like Node.
|
// like Node.
|
||||||
module.exports = factory(require('b'));
|
module.exports = factory(require('b'));
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -148,7 +148,7 @@ func NewFieldTooLong(field string, value interface{}, maxLength int) *Validation
|
||||||
type ValidationErrorList []error
|
type ValidationErrorList []error
|
||||||
|
|
||||||
// Prefix adds a prefix to the Field of every ValidationError in the list.
|
// Prefix adds a prefix to the Field of every ValidationError in the list.
|
||||||
// Also adds prefixes to multiple fields if you send an or seperator.
|
// Also adds prefixes to multiple fields if you send an or separator.
|
||||||
// Returns the list for convenience.
|
// Returns the list for convenience.
|
||||||
func (list ValidationErrorList) Prefix(prefix string) ValidationErrorList {
|
func (list ValidationErrorList) Prefix(prefix string) ValidationErrorList {
|
||||||
for i := range list {
|
for i := range list {
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue