mirror of https://github.com/k3s-io/k3s
bug: fix segfault when EnableServiceLinks is nil
When upgrading to 1.13, pods that were created prior to the upgrade have no pod.Spec.EnableServiceLinks set. This causes a segfault and prevents the pod from ever starting. Check and set to the default if nil. Fixes #71749pull/564/head
parent
8f1082c6af
commit
e9f1700512
|
@ -533,6 +533,10 @@ func (kl *Kubelet) getServiceEnvVarMap(ns string, enableServiceLinks bool) (map[
|
|||
// Make the environment variables for a pod in the given namespace.
|
||||
func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container, podIP string) ([]kubecontainer.EnvVar, error) {
|
||||
var result []kubecontainer.EnvVar
|
||||
enableServiceLinks := v1.DefaultEnableServiceLinks
|
||||
if pod.Spec.EnableServiceLinks != nil {
|
||||
enableServiceLinks = *pod.Spec.EnableServiceLinks
|
||||
}
|
||||
// Note: These are added to the docker Config, but are not included in the checksum computed
|
||||
// by kubecontainer.HashContainer(...). That way, we can still determine whether an
|
||||
// v1.Container is already running by its hash. (We don't want to restart a container just
|
||||
|
@ -542,7 +546,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container
|
|||
// To avoid this users can: (1) wait between starting a service and starting; or (2) detect
|
||||
// missing service env var and exit and be restarted; or (3) use DNS instead of env vars
|
||||
// and keep trying to resolve the DNS name of the service (recommended).
|
||||
serviceEnv, err := kl.getServiceEnvVarMap(pod.Namespace, *pod.Spec.EnableServiceLinks)
|
||||
serviceEnv, err := kl.getServiceEnvVarMap(pod.Namespace, enableServiceLinks)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue