kubeadm: update output of init, join reset commands

- move most unrelated to phases output to klog.V(1)
- rename some prefixes for consistency - e.g.
[kubelet] -> [kubelet-start]
- control-plane-prepare: print details for each generated CP
component manifest.
- uppercase the info text for all "[reset].." lines
- modify the text for one line in reset
pull/564/head
Lubomir I. Ivanov 2019-03-04 04:11:11 +02:00
parent feb0937fa4
commit 6f6b364b9c
14 changed files with 77 additions and 59 deletions

View File

@ -131,8 +131,21 @@ func runControlPlanePrepareManifestsSubphase(c workflow.RunData) error {
return err return err
} }
// Generate missing certificates (if any) fmt.Printf("[control-plane] Using manifest folder %q\n", kubeadmconstants.GetStaticPodDirectory())
return controlplane.CreateInitStaticPodManifestFiles(kubeadmconstants.GetStaticPodDirectory(), cfg)
for _, component := range kubeadmconstants.ControlPlaneComponents {
fmt.Printf("[control-plane] Creating static Pod manifest for %q\n", component)
err := controlplane.CreateStaticPodFiles(
kubeadmconstants.GetStaticPodDirectory(),
&cfg.ClusterConfiguration,
&cfg.LocalAPIEndpoint,
component,
)
if err != nil {
return err
}
}
return nil
} }
func runControlPlanePrepareDownloadCertsPhaseLocal(c workflow.RunData) error { func runControlPlanePrepareDownloadCertsPhaseLocal(c workflow.RunData) error {
@ -178,6 +191,8 @@ func runControlPlanePrepareCertsPhaseLocal(c workflow.RunData) error {
return err return err
} }
fmt.Printf("[certs] Using certificateDir folder %q\n", cfg.CertificatesDir)
// Generate missing certificates (if any) // Generate missing certificates (if any)
return certsphase.CreatePKIAssets(cfg) return certsphase.CreatePKIAssets(cfg)
} }
@ -198,7 +213,8 @@ func runControlPlanePrepareKubeconfigPhaseLocal(c workflow.RunData) error {
return err return err
} }
fmt.Println("[control-plane-prepare] Generating kubeconfig files") fmt.Println("[kubeconfig] Generating kubeconfig files")
fmt.Printf("[kubeconfig] Using kubeconfig folder %q\n", kubeadmconstants.KubernetesDir)
// Generate kubeconfig files for controller manager, scheduler and for the admin/kubeadm itself // Generate kubeconfig files for controller manager, scheduler and for the admin/kubeadm itself
// NB. The kubeconfig file for kubelet will be generated by the TLS bootstrap process in // NB. The kubeconfig file for kubelet will be generated by the TLS bootstrap process in

View File

@ -66,19 +66,19 @@ func NewCmdReset(in io.Reader, out io.Writer) *cobra.Command {
var cfg *kubeadmapi.InitConfiguration var cfg *kubeadmapi.InitConfiguration
client, err = getClientset(kubeConfigFile, false) client, err = getClientset(kubeConfigFile, false)
if err == nil { if err == nil {
klog.V(1).Infof("[reset] loaded client set from kubeconfig file: %s", kubeConfigFile) klog.V(1).Infof("[reset] Loaded client set from kubeconfig file: %s", kubeConfigFile)
cfg, err = configutil.FetchInitConfigurationFromCluster(client, os.Stdout, "reset", false) cfg, err = configutil.FetchInitConfigurationFromCluster(client, os.Stdout, "reset", false)
if err != nil { if err != nil {
klog.Warningf("[reset] Unable to fetch the kubeadm-config ConfigMap from cluster: %v", err) klog.Warningf("[reset] Unable to fetch the kubeadm-config ConfigMap from cluster: %v", err)
} }
} else { } else {
klog.V(1).Infof("[reset] could not get client set from missing kubeconfig file: %s", kubeConfigFile) klog.V(1).Infof("[reset] Could not obtain a client set from the kubeconfig file: %s", kubeConfigFile)
} }
if criSocketPath == "" { if criSocketPath == "" {
criSocketPath, err = resetDetectCRISocket(cfg) criSocketPath, err = resetDetectCRISocket(cfg)
kubeadmutil.CheckErr(err) kubeadmutil.CheckErr(err)
klog.V(1).Infof("[reset] detected and using CRI socket: %s", criSocketPath) klog.V(1).Infof("[reset] Detected and using CRI socket: %s", criSocketPath)
} }
r, err := NewReset(in, ignorePreflightErrorsSet, forceReset, certsDir, criSocketPath) r, err := NewReset(in, ignorePreflightErrorsSet, forceReset, certsDir, criSocketPath)
@ -114,8 +114,8 @@ type Reset struct {
// NewReset instantiate Reset struct // NewReset instantiate Reset struct
func NewReset(in io.Reader, ignorePreflightErrors sets.String, forceReset bool, certsDir, criSocketPath string) (*Reset, error) { func NewReset(in io.Reader, ignorePreflightErrors sets.String, forceReset bool, certsDir, criSocketPath string) (*Reset, error) {
if !forceReset { if !forceReset {
fmt.Println("[reset] WARNING: changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted.") fmt.Println("[reset] WARNING: Changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted.")
fmt.Print("[reset] are you sure you want to proceed? [y/N]: ") fmt.Print("[reset] Are you sure you want to proceed? [y/N]: ")
s := bufio.NewScanner(in) s := bufio.NewScanner(in)
s.Scan() s.Scan()
if err := s.Err(); err != nil { if err := s.Err(); err != nil {
@ -126,7 +126,7 @@ func NewReset(in io.Reader, ignorePreflightErrors sets.String, forceReset bool,
} }
} }
fmt.Println("[preflight] running pre-flight checks") fmt.Println("[preflight] Running pre-flight checks")
if err := preflight.RunRootCheckOnly(ignorePreflightErrors); err != nil { if err := preflight.RunRootCheckOnly(ignorePreflightErrors); err != nil {
return nil, err return nil, err
} }
@ -143,7 +143,7 @@ func (r *Reset) Run(out io.Writer, client clientset.Interface, cfg *kubeadmapi.I
// Only clear etcd data when using local etcd. // Only clear etcd data when using local etcd.
etcdManifestPath := filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.ManifestsSubDirName, "etcd.yaml") etcdManifestPath := filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.ManifestsSubDirName, "etcd.yaml")
klog.V(1).Infoln("[reset] checking for etcd config") klog.V(1).Infoln("[reset] Checking for etcd config")
etcdDataDir, err := getEtcdDataDir(etcdManifestPath, cfg) etcdDataDir, err := getEtcdDataDir(etcdManifestPath, cfg)
if err == nil { if err == nil {
dirsToClean = append(dirsToClean, etcdDataDir) dirsToClean = append(dirsToClean, etcdDataDir)
@ -153,21 +153,21 @@ func (r *Reset) Run(out io.Writer, client clientset.Interface, cfg *kubeadmapi.I
} }
} }
} else { } else {
fmt.Println("[reset] no etcd config found. Assuming external etcd") fmt.Println("[reset] No etcd config found. Assuming external etcd")
fmt.Println("[reset] please manually reset etcd to prevent further issues") fmt.Println("[reset] Please manually reset etcd to prevent further issues")
} }
// Try to stop the kubelet service // Try to stop the kubelet service
klog.V(1).Infoln("[reset] getting init system") klog.V(1).Infoln("[reset] Getting init system")
initSystem, err := initsystem.GetInitSystem() initSystem, err := initsystem.GetInitSystem()
if err != nil { if err != nil {
klog.Warningln("[reset] the kubelet service could not be stopped by kubeadm. Unable to detect a supported init system!") klog.Warningln("[reset] The kubelet service could not be stopped by kubeadm. Unable to detect a supported init system!")
klog.Warningln("[reset] please ensure kubelet is stopped manually") klog.Warningln("[reset] Please ensure kubelet is stopped manually")
} else { } else {
fmt.Println("[reset] stopping the kubelet service") fmt.Println("[reset] Stopping the kubelet service")
if err := initSystem.ServiceStop("kubelet"); err != nil { if err := initSystem.ServiceStop("kubelet"); err != nil {
klog.Warningf("[reset] the kubelet service could not be stopped by kubeadm: [%v]\n", err) klog.Warningf("[reset] The kubelet service could not be stopped by kubeadm: [%v]\n", err)
klog.Warningln("[reset] please ensure kubelet is stopped manually") klog.Warningln("[reset] Please ensure kubelet is stopped manually")
} }
} }
@ -175,30 +175,30 @@ func (r *Reset) Run(out io.Writer, client clientset.Interface, cfg *kubeadmapi.I
fmt.Printf("[reset] unmounting mounted directories in %q\n", kubeadmconstants.KubeletRunDirectory) fmt.Printf("[reset] unmounting mounted directories in %q\n", kubeadmconstants.KubeletRunDirectory)
umountDirsCmd := fmt.Sprintf("awk '$2 ~ path {print $2}' path=%s/ /proc/mounts | xargs -r umount", kubeadmconstants.KubeletRunDirectory) umountDirsCmd := fmt.Sprintf("awk '$2 ~ path {print $2}' path=%s/ /proc/mounts | xargs -r umount", kubeadmconstants.KubeletRunDirectory)
klog.V(1).Infof("[reset] executing command %q", umountDirsCmd) klog.V(1).Infof("[reset] Executing command %q", umountDirsCmd)
umountOutputBytes, err := exec.Command("sh", "-c", umountDirsCmd).Output() umountOutputBytes, err := exec.Command("sh", "-c", umountDirsCmd).Output()
if err != nil { if err != nil {
klog.Errorf("[reset] failed to unmount mounted directories in %s: %s\n", kubeadmconstants.KubeletRunDirectory, string(umountOutputBytes)) klog.Errorf("[reset] Failed to unmount mounted directories in %s: %s\n", kubeadmconstants.KubeletRunDirectory, string(umountOutputBytes))
} }
klog.V(1).Info("[reset] removing Kubernetes-managed containers") klog.V(1).Info("[reset] Removing Kubernetes-managed containers")
if err := removeContainers(utilsexec.New(), r.criSocketPath); err != nil { if err := removeContainers(utilsexec.New(), r.criSocketPath); err != nil {
klog.Errorf("[reset] failed to remove containers: %v", err) klog.Errorf("[reset] Failed to remove containers: %v", err)
} }
dirsToClean = append(dirsToClean, []string{kubeadmconstants.KubeletRunDirectory, "/etc/cni/net.d", "/var/lib/dockershim", "/var/run/kubernetes"}...) dirsToClean = append(dirsToClean, []string{kubeadmconstants.KubeletRunDirectory, "/etc/cni/net.d", "/var/lib/dockershim", "/var/run/kubernetes"}...)
// Then clean contents from the stateful kubelet, etcd and cni directories // Then clean contents from the stateful kubelet, etcd and cni directories
fmt.Printf("[reset] deleting contents of stateful directories: %v\n", dirsToClean) fmt.Printf("[reset] Deleting contents of stateful directories: %v\n", dirsToClean)
for _, dir := range dirsToClean { for _, dir := range dirsToClean {
klog.V(1).Infof("[reset] deleting content of %s", dir) klog.V(1).Infof("[reset] Deleting content of %s", dir)
cleanDir(dir) cleanDir(dir)
} }
// Remove contents from the config and pki directories // Remove contents from the config and pki directories
klog.V(1).Infoln("[reset] removing contents from the config and pki directories") klog.V(1).Infoln("[reset] Removing contents from the config and pki directories")
if r.certsDir != kubeadmapiv1beta1.DefaultCertificatesDir { if r.certsDir != kubeadmapiv1beta1.DefaultCertificatesDir {
klog.Warningf("[reset] WARNING: cleaning a non-default certificates directory: %q\n", r.certsDir) klog.Warningf("[reset] WARNING: Cleaning a non-default certificates directory: %q\n", r.certsDir)
} }
resetConfigDir(kubeadmconstants.KubernetesDir, r.certsDir) resetConfigDir(kubeadmconstants.KubernetesDir, r.certsDir)
@ -287,10 +287,10 @@ func resetConfigDir(configPathDir, pkiPathDir string) {
filepath.Join(configPathDir, kubeadmconstants.ManifestsSubDirName), filepath.Join(configPathDir, kubeadmconstants.ManifestsSubDirName),
pkiPathDir, pkiPathDir,
} }
fmt.Printf("[reset] deleting contents of config directories: %v\n", dirsToClean) fmt.Printf("[reset] Deleting contents of config directories: %v\n", dirsToClean)
for _, dir := range dirsToClean { for _, dir := range dirsToClean {
if err := cleanDir(dir); err != nil { if err := cleanDir(dir); err != nil {
klog.Errorf("[reset] failed to remove directory: %q [%v]\n", dir, err) klog.Errorf("[reset] Failed to remove directory: %q [%v]\n", dir, err)
} }
} }
@ -301,10 +301,10 @@ func resetConfigDir(configPathDir, pkiPathDir string) {
filepath.Join(configPathDir, kubeadmconstants.ControllerManagerKubeConfigFileName), filepath.Join(configPathDir, kubeadmconstants.ControllerManagerKubeConfigFileName),
filepath.Join(configPathDir, kubeadmconstants.SchedulerKubeConfigFileName), filepath.Join(configPathDir, kubeadmconstants.SchedulerKubeConfigFileName),
} }
fmt.Printf("[reset] deleting files: %v\n", filesToClean) fmt.Printf("[reset] Deleting files: %v\n", filesToClean)
for _, path := range filesToClean { for _, path := range filesToClean {
if err := os.RemoveAll(path); err != nil { if err := os.RemoveAll(path); err != nil {
klog.Errorf("[reset] failed to remove file: %q [%v]\n", path, err) klog.Errorf("[reset] Failed to remove file: %q [%v]\n", path, err)
} }
} }
} }

View File

@ -20,6 +20,7 @@ go_library(
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
"//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library", "//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library", "//vendor/github.com/pkg/errors:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
], ],
) )

View File

@ -17,7 +17,6 @@ limitations under the License.
package file package file
import ( import (
"fmt"
"io/ioutil" "io/ioutil"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -29,6 +28,7 @@ import (
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api" clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
bootstrapapi "k8s.io/cluster-bootstrap/token/api" bootstrapapi "k8s.io/cluster-bootstrap/token/api"
"k8s.io/klog"
"k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/constants"
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
) )
@ -104,7 +104,7 @@ func ValidateConfigInfo(config *clientcmdapi.Config, clustername string) (*clien
return nil, err return nil, err
} }
fmt.Printf("[discovery] Created cluster-info discovery client, requesting info from %q\n", defaultCluster.Server) klog.V(1).Infof("[discovery] Created cluster-info discovery client, requesting info from %q\n", defaultCluster.Server)
var clusterinfoCM *v1.ConfigMap var clusterinfoCM *v1.ConfigMap
wait.PollInfinite(constants.DiscoveryRetryInterval, func() (bool, error) { wait.PollInfinite(constants.DiscoveryRetryInterval, func() (bool, error) {
@ -114,10 +114,10 @@ func ValidateConfigInfo(config *clientcmdapi.Config, clustername string) (*clien
if apierrors.IsForbidden(err) { if apierrors.IsForbidden(err) {
// If the request is unauthorized, the cluster admin has not granted access to the cluster info configmap for unauthenticated users // If the request is unauthorized, the cluster admin has not granted access to the cluster info configmap for unauthenticated users
// In that case, trust the cluster admin and do not refresh the cluster-info credentials // In that case, trust the cluster admin and do not refresh the cluster-info credentials
fmt.Printf("[discovery] Could not access the %s ConfigMap for refreshing the cluster-info information, but the TLS cert is valid so proceeding...\n", bootstrapapi.ConfigMapClusterInfo) klog.Warningf("[discovery] Could not access the %s ConfigMap for refreshing the cluster-info information, but the TLS cert is valid so proceeding...\n", bootstrapapi.ConfigMapClusterInfo)
return true, nil return true, nil
} }
fmt.Printf("[discovery] Failed to validate the API Server's identity, will try again: [%v]\n", err) klog.V(1).Infof("[discovery] Failed to validate the API Server's identity, will try again: [%v]\n", err)
return false, nil return false, nil
} }
return true, nil return true, nil
@ -131,11 +131,11 @@ func ValidateConfigInfo(config *clientcmdapi.Config, clustername string) (*clien
// We somehow got hold of the ConfigMap, try to read some data from it. If we can't, fallback on the user-provided file // We somehow got hold of the ConfigMap, try to read some data from it. If we can't, fallback on the user-provided file
refreshedBaseKubeConfig, err := tryParseClusterInfoFromConfigMap(clusterinfoCM) refreshedBaseKubeConfig, err := tryParseClusterInfoFromConfigMap(clusterinfoCM)
if err != nil { if err != nil {
fmt.Printf("[discovery] The %s ConfigMap isn't set up properly (%v), but the TLS cert is valid so proceeding...\n", bootstrapapi.ConfigMapClusterInfo, err) klog.V(1).Infof("[discovery] The %s ConfigMap isn't set up properly (%v), but the TLS cert is valid so proceeding...\n", bootstrapapi.ConfigMapClusterInfo, err)
return kubeconfig, nil return kubeconfig, nil
} }
fmt.Println("[discovery] Synced cluster-info information from the API Server so we have got the latest information") klog.V(1).Infoln("[discovery] Synced cluster-info information from the API Server so we have got the latest information")
// In an HA world in the future, this will make more sense, because now we've got new information, possibly about new API Servers to talk to // In an HA world in the future, this will make more sense, because now we've got new information, possibly about new API Servers to talk to
return refreshedBaseKubeConfig, nil return refreshedBaseKubeConfig, nil
} }

View File

@ -24,6 +24,7 @@ go_library(
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
"//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library", "//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library", "//vendor/github.com/pkg/errors:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
], ],
) )

View File

@ -32,6 +32,7 @@ import (
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api" clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
bootstrapapi "k8s.io/cluster-bootstrap/token/api" bootstrapapi "k8s.io/cluster-bootstrap/token/api"
"k8s.io/klog"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1"
"k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/constants"
@ -71,7 +72,7 @@ func RetrieveValidatedConfigInfo(cfg *kubeadmapi.JoinConfiguration) (*clientcmda
return nil, err return nil, err
} }
fmt.Printf("[discovery] Created cluster-info discovery client, requesting info from %q\n", insecureBootstrapConfig.Clusters[clusterName].Server) klog.V(1).Infof("[discovery] Created cluster-info discovery client, requesting info from %q\n", insecureBootstrapConfig.Clusters[clusterName].Server)
// Make an initial insecure connection to get the cluster-info ConfigMap // Make an initial insecure connection to get the cluster-info ConfigMap
var insecureClusterInfo *v1.ConfigMap var insecureClusterInfo *v1.ConfigMap
@ -79,7 +80,7 @@ func RetrieveValidatedConfigInfo(cfg *kubeadmapi.JoinConfiguration) (*clientcmda
var err error var err error
insecureClusterInfo, err = insecureClient.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) insecureClusterInfo, err = insecureClient.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
if err != nil { if err != nil {
fmt.Printf("[discovery] Failed to request cluster info, will try again: [%s]\n", err) klog.V(1).Infof("[discovery] Failed to request cluster info, will try again: [%s]\n", err)
return false, nil return false, nil
} }
return true, nil return true, nil
@ -106,7 +107,7 @@ func RetrieveValidatedConfigInfo(cfg *kubeadmapi.JoinConfiguration) (*clientcmda
// If no TLS root CA pinning was specified, we're done // If no TLS root CA pinning was specified, we're done
if pubKeyPins.Empty() { if pubKeyPins.Empty() {
fmt.Printf("[discovery] Cluster info signature and contents are valid and no TLS pinning was specified, will use API Server %q\n", endpoint) klog.V(1).Infof("[discovery] Cluster info signature and contents are valid and no TLS pinning was specified, will use API Server %q\n", endpoint)
return insecureConfig, nil return insecureConfig, nil
} }
@ -137,13 +138,13 @@ func RetrieveValidatedConfigInfo(cfg *kubeadmapi.JoinConfiguration) (*clientcmda
return nil, err return nil, err
} }
fmt.Printf("[discovery] Requesting info from %q again to validate TLS against the pinned public key\n", insecureBootstrapConfig.Clusters[clusterName].Server) klog.V(1).Infof("[discovery] Requesting info from %q again to validate TLS against the pinned public key\n", insecureBootstrapConfig.Clusters[clusterName].Server)
var secureClusterInfo *v1.ConfigMap var secureClusterInfo *v1.ConfigMap
wait.PollImmediateInfinite(constants.DiscoveryRetryInterval, func() (bool, error) { wait.PollImmediateInfinite(constants.DiscoveryRetryInterval, func() (bool, error) {
var err error var err error
secureClusterInfo, err = secureClient.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) secureClusterInfo, err = secureClient.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
if err != nil { if err != nil {
fmt.Printf("[discovery] Failed to request cluster info, will try again: [%s]\n", err) klog.V(1).Infof("[discovery] Failed to request cluster info, will try again: [%s]\n", err)
return false, nil return false, nil
} }
return true, nil return true, nil
@ -160,7 +161,7 @@ func RetrieveValidatedConfigInfo(cfg *kubeadmapi.JoinConfiguration) (*clientcmda
return nil, errors.Wrapf(err, "couldn't parse the kubeconfig file in the %s configmap", bootstrapapi.ConfigMapClusterInfo) return nil, errors.Wrapf(err, "couldn't parse the kubeconfig file in the %s configmap", bootstrapapi.ConfigMapClusterInfo)
} }
fmt.Printf("[discovery] Cluster info signature and contents are valid and TLS certificate validates against pinned roots, will use API Server %q\n", endpoint) klog.V(1).Infof("[discovery] Cluster info signature and contents are valid and TLS certificate validates against pinned roots, will use API Server %q\n", endpoint)
return secureKubeconfig, nil return secureKubeconfig, nil
}) })
if err != nil { if err != nil {
@ -196,13 +197,13 @@ func fetchKubeConfigWithTimeout(apiEndpoint string, discoveryTimeout time.Durati
go func() { go func() {
defer wg.Done() defer wg.Done()
wait.Until(func() { wait.Until(func() {
fmt.Printf("[discovery] Trying to connect to API Server %q\n", apiEndpoint) klog.V(1).Infof("[discovery] Trying to connect to API Server %q\n", apiEndpoint)
cfg, err := fetchKubeConfigFunc(apiEndpoint) cfg, err := fetchKubeConfigFunc(apiEndpoint)
if err != nil { if err != nil {
fmt.Printf("[discovery] Failed to connect to API Server %q: %v\n", apiEndpoint, err) klog.V(1).Infof("[discovery] Failed to connect to API Server %q: %v\n", apiEndpoint, err)
return return
} }
fmt.Printf("[discovery] Successfully established connection with API Server %q\n", apiEndpoint) klog.V(1).Infof("[discovery] Successfully established connection with API Server %q\n", apiEndpoint)
once.Do(func() { once.Do(func() {
resultingKubeConfig = cfg resultingKubeConfig = cfg
close(stopChan) close(stopChan)
@ -216,7 +217,7 @@ func fetchKubeConfigWithTimeout(apiEndpoint string, discoveryTimeout time.Durati
close(stopChan) close(stopChan)
}) })
err := errors.Errorf("abort connecting to API servers after timeout of %v", discoveryTimeout) err := errors.Errorf("abort connecting to API servers after timeout of %v", discoveryTimeout)
fmt.Printf("[discovery] %v\n", err) klog.V(1).Infof("[discovery] %v\n", err)
wg.Wait() wg.Wait()
return nil, err return nil, err
case <-stopChan: case <-stopChan:

View File

@ -57,7 +57,7 @@ func CreatePKIAssets(cfg *kubeadmapi.InitConfiguration) error {
return errors.Wrap(err, "error creating PKI assets") return errors.Wrap(err, "error creating PKI assets")
} }
fmt.Printf("[certs] valid certificates and keys now exist in %q\n", cfg.CertificatesDir) fmt.Printf("[certs] Valid certificates and keys now exist in %q\n", cfg.CertificatesDir)
// Service accounts are not x509 certs, so handled separately // Service accounts are not x509 certs, so handled separately
return CreateServiceAccountKeyAndPublicKeyFiles(cfg.CertificatesDir) return CreateServiceAccountKeyAndPublicKeyFiles(cfg.CertificatesDir)

View File

@ -63,7 +63,7 @@ func CreateLocalEtcdStaticPodManifestFile(manifestDir string, nodeName string, c
// CheckLocalEtcdClusterStatus verifies health state of local/stacked etcd cluster before installing a new etcd member // CheckLocalEtcdClusterStatus verifies health state of local/stacked etcd cluster before installing a new etcd member
func CheckLocalEtcdClusterStatus(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration) error { func CheckLocalEtcdClusterStatus(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration) error {
fmt.Println("[etcd] Checking etcd cluster health") klog.V(1).Info("[etcd] Checking etcd cluster health")
// creates an etcd client that connects to all the local/stacked etcd members // creates an etcd client that connects to all the local/stacked etcd members
klog.V(1).Info("creating etcd client that connects to etcd pods") klog.V(1).Info("creating etcd client that connects to etcd pods")

View File

@ -127,7 +127,7 @@ func DownloadConfig(client clientset.Interface, kubeletVersion *version.Version,
// Download the ConfigMap from the cluster based on what version the kubelet is // Download the ConfigMap from the cluster based on what version the kubelet is
configMapName := kubeadmconstants.GetKubeletConfigMapName(kubeletVersion) configMapName := kubeadmconstants.GetKubeletConfigMapName(kubeletVersion)
fmt.Printf("[kubelet] Downloading configuration for the kubelet from the %q ConfigMap in the %s namespace\n", fmt.Printf("[kubelet-start] Downloading configuration for the kubelet from the %q ConfigMap in the %s namespace\n",
configMapName, metav1.NamespaceSystem) configMapName, metav1.NamespaceSystem)
kubeletCfg, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(configMapName, metav1.GetOptions{}) kubeletCfg, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(configMapName, metav1.GetOptions{})

View File

@ -10,6 +10,7 @@ go_library(
"//cmd/kubeadm/app/util/apiclient:go_default_library", "//cmd/kubeadm/app/util/apiclient:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
], ],
) )

View File

@ -17,10 +17,9 @@ limitations under the License.
package patchnode package patchnode
import ( import (
"fmt"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
"k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
) )
@ -28,7 +27,7 @@ import (
// AnnotateCRISocket annotates the node with the given crisocket // AnnotateCRISocket annotates the node with the given crisocket
func AnnotateCRISocket(client clientset.Interface, nodeName string, criSocket string) error { func AnnotateCRISocket(client clientset.Interface, nodeName string, criSocket string) error {
fmt.Printf("[patchnode] Uploading the CRI Socket information %q to the Node API object %q as an annotation\n", criSocket, nodeName) klog.V(1).Infof("[patchnode] Uploading the CRI Socket information %q to the Node API object %q as an annotation\n", criSocket, nodeName)
return apiclient.PatchNode(client, nodeName, func(n *v1.Node) { return apiclient.PatchNode(client, nodeName, func(n *v1.Node) {
annotateNodeWithCRISocket(n, criSocket) annotateNodeWithCRISocket(n, criSocket)

View File

@ -39,7 +39,7 @@ const (
// UploadConfiguration saves the InitConfiguration used for later reference (when upgrading for instance) // UploadConfiguration saves the InitConfiguration used for later reference (when upgrading for instance)
func UploadConfiguration(cfg *kubeadmapi.InitConfiguration, client clientset.Interface) error { func UploadConfiguration(cfg *kubeadmapi.InitConfiguration, client clientset.Interface) error {
fmt.Printf("[uploadconfig] storing the configuration used in ConfigMap %q in the %q Namespace\n", kubeadmconstants.KubeadmConfigConfigMap, metav1.NamespaceSystem) fmt.Printf("[upload-config] storing the configuration used in ConfigMap %q in the %q Namespace\n", kubeadmconstants.KubeadmConfigConfigMap, metav1.NamespaceSystem)
// Prepare the ClusterConfiguration for upload // Prepare the ClusterConfiguration for upload
// The components store their config in their own ConfigMaps, then reset the .ComponentConfig struct; // The components store their config in their own ConfigMaps, then reset the .ComponentConfig struct;

View File

@ -234,7 +234,7 @@ func PatchNodeOnce(client clientset.Interface, nodeName string, patchFn func(*v1
if _, err := client.CoreV1().Nodes().Patch(n.Name, types.StrategicMergePatchType, patchBytes); err != nil { if _, err := client.CoreV1().Nodes().Patch(n.Name, types.StrategicMergePatchType, patchBytes); err != nil {
// TODO also check for timeouts // TODO also check for timeouts
if apierrors.IsConflict(err) { if apierrors.IsConflict(err) {
fmt.Println("[patchnode] Temporarily unable to update node metadata due to conflict (will retry)") fmt.Println("Temporarily unable to update node metadata due to conflict (will retry)")
return false, nil return false, nil
} }
return false, errors.Wrapf(err, "error patching node %q through apiserver", n.Name) return false, errors.Wrapf(err, "error patching node %q through apiserver", n.Name)

View File

@ -19,7 +19,6 @@ package etcd
import ( import (
"context" "context"
"crypto/tls" "crypto/tls"
"fmt"
"net" "net"
"net/url" "net/url"
"path/filepath" "path/filepath"
@ -322,17 +321,17 @@ func (c *Client) GetClusterStatus() (map[string]*clientv3.StatusResponse, error)
func (c *Client) WaitForClusterAvailable(retries int, retryInterval time.Duration) (bool, error) { func (c *Client) WaitForClusterAvailable(retries int, retryInterval time.Duration) (bool, error) {
for i := 0; i < retries; i++ { for i := 0; i < retries; i++ {
if i > 0 { if i > 0 {
fmt.Printf("[util/etcd] Waiting %v until next retry\n", retryInterval) klog.V(1).Infof("[etcd] Waiting %v until next retry\n", retryInterval)
time.Sleep(retryInterval) time.Sleep(retryInterval)
} }
klog.V(2).Infof("attempting to see if all cluster endpoints (%s) are available %d/%d", c.Endpoints, i+1, retries) klog.V(2).Infof("[etcd] attempting to see if all cluster endpoints (%s) are available %d/%d", c.Endpoints, i+1, retries)
resp, err := c.ClusterAvailable() resp, err := c.ClusterAvailable()
if err != nil { if err != nil {
switch err { switch err {
case context.DeadlineExceeded: case context.DeadlineExceeded:
fmt.Println("[util/etcd] Attempt timed out") klog.V(1).Infof("[etcd] Attempt timed out")
default: default:
fmt.Printf("[util/etcd] Attempt failed with error: %v\n", err) klog.V(1).Infof("[etcd] Attempt failed with error: %v\n", err)
} }
continue continue
} }