diff --git a/cmd/hyperkube/kubelet.go b/cmd/hyperkube/kubelet.go index 518692ba4d..1b2b65edeb 100644 --- a/cmd/hyperkube/kubelet.go +++ b/cmd/hyperkube/kubelet.go @@ -23,8 +23,12 @@ import ( // NewKubelet creates a new hyperkube Server object that includes the // description and flags. -func NewKubelet() *Server { - s := options.NewKubeletServer() +func NewKubelet() (*Server, error) { + s, err := options.NewKubeletServer() + if err != nil { + return nil, err + } + hks := Server{ name: "kubelet", SimpleUsage: "kubelet", @@ -39,5 +43,5 @@ func NewKubelet() *Server { }, } s.AddFlags(hks.Flags()) - return &hks + return &hks, nil } diff --git a/cmd/hyperkube/main.go b/cmd/hyperkube/main.go index e1e07b7d16..ef49d06750 100644 --- a/cmd/hyperkube/main.go +++ b/cmd/hyperkube/main.go @@ -20,6 +20,7 @@ limitations under the License. package main import ( + "fmt" "os" _ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration @@ -36,7 +37,12 @@ func main() { hk.AddServer(NewKubeAPIServer()) hk.AddServer(NewKubeControllerManager()) hk.AddServer(NewScheduler()) - hk.AddServer(NewKubelet()) + if kubelet, err := NewKubelet(); err != nil { + fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } else { + hk.AddServer(kubelet) + } hk.AddServer(NewKubeProxy()) hk.AddServer(NewKubeAggregator()) diff --git a/cmd/kubelet/BUILD b/cmd/kubelet/BUILD index 6995ac589d..7f83f8fced 100644 --- a/cmd/kubelet/BUILD +++ b/cmd/kubelet/BUILD @@ -23,10 +23,14 @@ go_library( deps = [ "//cmd/kubelet/app:go_default_library", "//cmd/kubelet/app/options:go_default_library", + "//pkg/apis/componentconfig:go_default_library", "//pkg/client/metrics/prometheus:go_default_library", + "//pkg/features:go_default_library", + "//pkg/kubelet/kubeletconfig:go_default_library", "//pkg/version/prometheus:go_default_library", "//pkg/version/verflag:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/logs:go_default_library", ], diff --git a/cmd/kubelet/app/BUILD b/cmd/kubelet/app/BUILD index 550d8cb7b4..77403348bc 100644 --- a/cmd/kubelet/app/BUILD +++ b/cmd/kubelet/app/BUILD @@ -52,6 +52,7 @@ go_library( "//pkg/kubelet/dockershim/remote:go_default_library", "//pkg/kubelet/eviction:go_default_library", "//pkg/kubelet/eviction/api:go_default_library", + "//pkg/kubelet/kubeletconfig:go_default_library", "//pkg/kubelet/network:go_default_library", "//pkg/kubelet/network/cni:go_default_library", "//pkg/kubelet/network/kubenet:go_default_library", @@ -100,8 +101,6 @@ go_library( "//vendor/golang.org/x/exp/inotify:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/cmd/kubelet/app/options/BUILD b/cmd/kubelet/app/options/BUILD index 7a94cb918c..215b5f0d42 100644 --- a/cmd/kubelet/app/options/BUILD +++ b/cmd/kubelet/app/options/BUILD @@ -19,6 +19,8 @@ go_library( "//pkg/apis/componentconfig:go_default_library", "//pkg/apis/componentconfig/install:go_default_library", "//pkg/apis/componentconfig/v1alpha1:go_default_library", + "//pkg/apis/componentconfig/validation:go_default_library", + "//pkg/features:go_default_library", "//pkg/util/taints:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/cmd/kubelet/app/options/options.go b/cmd/kubelet/app/options/options.go index e43f07147f..f5e428a529 100644 --- a/cmd/kubelet/app/options/options.go +++ b/cmd/kubelet/app/options/options.go @@ -18,6 +18,7 @@ limitations under the License. package options import ( + "fmt" _ "net/http/pprof" "strings" @@ -26,9 +27,10 @@ import ( utilflag "k8s.io/apiserver/pkg/util/flag" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/componentconfig" - // Need to make sure the componentconfig api is installed so defaulting funcs work - _ "k8s.io/kubernetes/pkg/apis/componentconfig/install" + _ "k8s.io/kubernetes/pkg/apis/componentconfig/install" // Need to make sure the componentconfig api is installed so defaulting funcs work "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1" + componentconfigvalidation "k8s.io/kubernetes/pkg/apis/componentconfig/validation" + "k8s.io/kubernetes/pkg/features" utiltaints "k8s.io/kubernetes/pkg/util/taints" "github.com/spf13/pflag" @@ -79,6 +81,72 @@ type KubeletFlags struct { // Container-runtime-specific options. ContainerRuntimeOptions + + // certDirectory is the directory where the TLS certs are located (by + // default /var/run/kubernetes). If tlsCertFile and tlsPrivateKeyFile + // are provided, this flag will be ignored. + CertDirectory string + + // cloudProvider is the provider for cloud services. + // +optional + CloudProvider string + + // cloudConfigFile is the path to the cloud provider configuration file. + // +optional + CloudConfigFile string + + // rootDirectory is the directory path to place kubelet files (volume + // mounts,etc). + RootDirectory string + + // The Kubelet will use this directory for checkpointing downloaded configurations and tracking configuration health. + // The Kubelet will create this directory if it does not already exist. + // The path may be absolute or relative; relative paths are under the Kubelet's current working directory. + // Providing this flag enables dynamic kubelet configuration. + // To use this flag, the DynamicKubeletConfig feature gate must be enabled. + DynamicConfigDir flag.StringFlag + + // The Kubelet will look in this directory for an init configuration. + // The path may be absolute or relative; relative paths are under the Kubelet's current working directory. + // Omit this flag to use the combination of built-in default configuration values and flags. + // To use this flag, the DynamicKubeletConfig feature gate must be enabled. + InitConfigDir flag.StringFlag +} + +// NewKubeletFlags will create a new KubeletFlags with default values +func NewKubeletFlags() *KubeletFlags { + return &KubeletFlags{ + // TODO(#41161:v1.10.0): Remove the default kubeconfig path and --require-kubeconfig. + RequireKubeConfig: false, + KubeConfig: flag.NewStringFlag("/var/lib/kubelet/kubeconfig"), + ContainerRuntimeOptions: *NewContainerRuntimeOptions(), + CertDirectory: "/var/run/kubernetes", + CloudProvider: v1alpha1.AutoDetectCloudProvider, + RootDirectory: v1alpha1.DefaultRootDir, + } +} + +func ValidateKubeletFlags(f *KubeletFlags) error { + // ensure that nobody sets DynamicConfigDir if the dynamic config feature gate is turned off + if f.DynamicConfigDir.Provided() && !utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) { + return fmt.Errorf("the DynamicKubeletConfig feature gate must be enabled in order to use the --dynamic-config-dir flag") + } + // ensure that nobody sets InitConfigDir if the dynamic config feature gate is turned off + if f.InitConfigDir.Provided() && !utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) { + return fmt.Errorf("the DynamicKubeletConfig feature gate must be enabled in order to use the --init-config-dir flag") + } + return nil +} + +// NewKubeletConfiguration will create a new KubeletConfiguration with default values +func NewKubeletConfiguration() (*componentconfig.KubeletConfiguration, error) { + versioned := &v1alpha1.KubeletConfiguration{} + api.Scheme.Default(versioned) + config := &componentconfig.KubeletConfiguration{} + if err := api.Scheme.Convert(versioned, config, nil); err != nil { + return nil, err + } + return config, nil } // KubeletServer encapsulates all of the parameters necessary for starting up @@ -89,29 +157,33 @@ type KubeletServer struct { } // NewKubeletServer will create a new KubeletServer with default values. -func NewKubeletServer() *KubeletServer { - versioned := &v1alpha1.KubeletConfiguration{} - api.Scheme.Default(versioned) - config := componentconfig.KubeletConfiguration{} - api.Scheme.Convert(versioned, &config, nil) - return &KubeletServer{ - KubeletFlags: KubeletFlags{ - // TODO(#41161:v1.10.0): Remove the default kubeconfig path and --require-kubeconfig. - RequireKubeConfig: false, - KubeConfig: flag.NewStringFlag("/var/lib/kubelet/kubeconfig"), - ContainerRuntimeOptions: *NewContainerRuntimeOptions(), - }, - KubeletConfiguration: config, +func NewKubeletServer() (*KubeletServer, error) { + config, err := NewKubeletConfiguration() + if err != nil { + return nil, err } + return &KubeletServer{ + KubeletFlags: *NewKubeletFlags(), + KubeletConfiguration: *config, + }, nil } -type kubeletConfiguration componentconfig.KubeletConfiguration +// validateKubeletServer validates configuration of KubeletServer and returns an error if the input configuration is invalid +func ValidateKubeletServer(s *KubeletServer) error { + // please add any KubeletConfiguration validation to the componentconfigvalidation.ValidateKubeletConfiguration function + if err := componentconfigvalidation.ValidateKubeletConfiguration(&s.KubeletConfiguration); err != nil { + return err + } + if err := ValidateKubeletFlags(&s.KubeletFlags); err != nil { + return err + } + return nil +} // AddFlags adds flags for a specific KubeletServer to the specified FlagSet func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) { - var kc *kubeletConfiguration = (*kubeletConfiguration)(&s.KubeletConfiguration) s.KubeletFlags.AddFlags(fs) - kc.addFlags(fs) + AddKubeletConfigFlags(fs, &s.KubeletConfiguration) } // AddFlags adds flags for a specific KubeletFlags to the specified FlagSet @@ -140,10 +212,21 @@ func (f *KubeletFlags) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&f.NodeIP, "node-ip", f.NodeIP, "IP address of the node. If set, kubelet will use this IP address for the node") fs.StringVar(&f.ProviderID, "provider-id", f.ProviderID, "Unique identifier for identifying the node in a machine database, i.e cloudprovider") + + fs.StringVar(&f.CertDirectory, "cert-dir", f.CertDirectory, "The directory where the TLS certs are located. "+ + "If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored.") + + fs.StringVar(&f.CloudProvider, "cloud-provider", f.CloudProvider, "The provider for cloud services. By default, kubelet will attempt to auto-detect the cloud provider. Specify empty string for running with no cloud provider.") + fs.StringVar(&f.CloudConfigFile, "cloud-config", f.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.") + + fs.StringVar(&f.RootDirectory, "root-dir", f.RootDirectory, "Directory path for managing kubelet files (volume mounts,etc).") + + fs.Var(&f.DynamicConfigDir, "dynamic-config-dir", "The Kubelet will use this directory for checkpointing downloaded configurations and tracking configuration health. The Kubelet will create this directory if it does not already exist. The path may be absolute or relative; relative paths start at the Kubelet's current working directory. Providing this flag enables dynamic Kubelet configuration. Presently, you must also enable the DynamicKubeletConfig feature gate to pass this flag.") + fs.Var(&f.InitConfigDir, "init-config-dir", "The Kubelet will look in this directory for the init configuration. The path may be absolute or relative; relative paths start at the Kubelet's current working directory. Omit this argument to use the built-in default configuration values. Presently, you must also enable the DynamicKubeletConfig feature gate to pass this flag.") } -// addFlags adds flags for a specific componentconfig.KubeletConfiguration to the specified FlagSet -func (c *kubeletConfiguration) addFlags(fs *pflag.FlagSet) { +// AddKubeletConfigFlags adds flags for a specific componentconfig.KubeletConfiguration to the specified FlagSet +func AddKubeletConfigFlags(fs *pflag.FlagSet, c *componentconfig.KubeletConfiguration) { fs.BoolVar(&c.FailSwapOn, "fail-swap-on", true, "Makes the Kubelet fail to start if swap is enabled on the node. ") fs.BoolVar(&c.FailSwapOn, "experimental-fail-swap-on", true, "DEPRECATED: please use --fail-swap-on instead.") fs.MarkDeprecated("experimental-fail-swap-on", "This flag is deprecated and will be removed in future releases. please use --fail-swap-on instead.") @@ -186,10 +269,7 @@ func (c *kubeletConfiguration) addFlags(fs *pflag.FlagSet) { "If --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key "+ "are generated for the public address and saved to the directory passed to --cert-dir.") fs.StringVar(&c.TLSPrivateKeyFile, "tls-private-key-file", c.TLSPrivateKeyFile, "File containing x509 private key matching --tls-cert-file.") - fs.StringVar(&c.CertDirectory, "cert-dir", c.CertDirectory, "The directory where the TLS certs are located. "+ - "If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored.") - fs.StringVar(&c.RootDirectory, "root-dir", c.RootDirectory, "Directory path for managing kubelet files (volume mounts,etc).") fs.StringVar(&c.SeccompProfileRoot, "seccomp-profile-root", c.SeccompProfileRoot, "Directory path for seccomp profiles.") fs.BoolVar(&c.AllowPrivileged, "allow-privileged", c.AllowPrivileged, "If true, allow containers to request privileged mode.") fs.StringSliceVar(&c.HostNetworkSources, "host-network-sources", c.HostNetworkSources, "Comma-separated list of sources from which the Kubelet allows pods to use of host network.") @@ -227,8 +307,6 @@ func (c *kubeletConfiguration) addFlags(fs *pflag.FlagSet) { fs.Int32Var(&c.ImageGCLowThresholdPercent, "image-gc-low-threshold", c.ImageGCLowThresholdPercent, "The percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.") fs.DurationVar(&c.VolumeStatsAggPeriod.Duration, "volume-stats-agg-period", c.VolumeStatsAggPeriod.Duration, "Specifies interval for kubelet to calculate and cache the volume disk usage for all pods and volumes. To disable volume calculations, set to 0.") fs.StringVar(&c.VolumePluginDir, "volume-plugin-dir", c.VolumePluginDir, " The full path of the directory in which to search for additional third party volume plugins") - fs.StringVar(&c.CloudProvider, "cloud-provider", c.CloudProvider, "The provider for cloud services. By default, kubelet will attempt to auto-detect the cloud provider. Specify empty string for running with no cloud provider.") - fs.StringVar(&c.CloudConfigFile, "cloud-config", c.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.") fs.StringVar(&c.FeatureGates, "feature-gates", c.FeatureGates, "A set of key=value pairs that describe feature gates for alpha/experimental features. "+ "Options are:\n"+strings.Join(utilfeature.DefaultFeatureGate.KnownFeatures(), "\n")) diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 77cf0c37e7..84f221346c 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -28,6 +28,7 @@ import ( "net/url" "os" "path" + "path/filepath" "strconv" "time" @@ -37,8 +38,6 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" @@ -73,6 +72,7 @@ import ( dockerremote "k8s.io/kubernetes/pkg/kubelet/dockershim/remote" "k8s.io/kubernetes/pkg/kubelet/eviction" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" + "k8s.io/kubernetes/pkg/kubelet/kubeletconfig" "k8s.io/kubernetes/pkg/kubelet/server" "k8s.io/kubernetes/pkg/kubelet/server/streaming" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" @@ -93,7 +93,8 @@ const ( // NewKubeletCommand creates a *cobra.Command object with default parameters func NewKubeletCommand() *cobra.Command { - s := options.NewKubeletServer() + // ignore the error, as this is just for generating docs and the like + s, _ := options.NewKubeletServer() s.AddFlags(pflag.CommandLine) cmd := &cobra.Command{ Use: componentKubelet, @@ -167,119 +168,7 @@ func UnsecuredDependencies(s *options.KubeletServer) (*kubelet.Dependencies, err }, nil } -func getKubeClient(s *options.KubeletServer) (*clientset.Clientset, error) { - clientConfig, err := CreateAPIServerClientConfig(s) - if err == nil { - kubeClient, err := clientset.NewForConfig(clientConfig) - if err != nil { - return nil, err - } - return kubeClient, nil - } - return nil, err -} - -// Tries to download the kubelet- configmap from "kube-system" namespace via the API server and returns a JSON string or error -func getRemoteKubeletConfig(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (string, error) { - // TODO(mtaufen): should probably cache clientset and pass into this function rather than regenerate on every request - kubeClient, err := getKubeClient(s) - if err != nil { - return "", err - } - - configmap, err := func() (*v1.ConfigMap, error) { - var nodename types.NodeName - hostname := nodeutil.GetHostname(s.HostnameOverride) - - if kubeDeps != nil && kubeDeps.Cloud != nil { - instances, ok := kubeDeps.Cloud.Instances() - if !ok { - err = fmt.Errorf("failed to get instances from cloud provider, can't determine nodename") - return nil, err - } - nodename, err = instances.CurrentNodeName(hostname) - if err != nil { - err = fmt.Errorf("error fetching current instance name from cloud provider: %v", err) - return nil, err - } - // look for kubelet- configmap from "kube-system" - configmap, err := kubeClient.CoreV1().ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", nodename), metav1.GetOptions{}) - if err != nil { - return nil, err - } - return configmap, nil - } - // No cloud provider yet, so can't get the nodename via Cloud.Instances().CurrentNodeName(hostname), try just using the hostname - configmap, err := kubeClient.CoreV1().ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", hostname), metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("cloud provider was nil, and attempt to use hostname to find config resulted in: %v", err) - } - return configmap, nil - }() - if err != nil { - return "", err - } - - // When we create the KubeletConfiguration configmap, we put a json string - // representation of the config in a `kubelet.config` key. - jsonstr, ok := configmap.Data["kubelet.config"] - if !ok { - return "", fmt.Errorf("KubeletConfiguration configmap did not contain a value with key `kubelet.config`") - } - - return jsonstr, nil -} - -func startKubeletConfigSyncLoop(s *options.KubeletServer, currentKC string) { - glog.Infof("Starting Kubelet configuration sync loop") - go func() { - wait.PollInfinite(30*time.Second, func() (bool, error) { - glog.Infof("Checking API server for new Kubelet configuration.") - remoteKC, err := getRemoteKubeletConfig(s, nil) - if err == nil { - // Detect new config by comparing with the last JSON string we extracted. - if remoteKC != currentKC { - glog.Info("Found new Kubelet configuration via API server, restarting!") - os.Exit(0) - } - } else { - glog.Infof("Did not find a configuration for this Kubelet via API server: %v", err) - } - return false, nil // Always return (false, nil) so we poll forever. - }) - }() -} - -// Try to check for config on the API server, return that config if we get it, and start -// a background thread that checks for updates to configs. -func initKubeletConfigSync(s *options.KubeletServer) (*componentconfig.KubeletConfiguration, error) { - jsonstr, err := getRemoteKubeletConfig(s, nil) - if err == nil { - // We will compare future API server config against the config we just got (jsonstr): - startKubeletConfigSyncLoop(s, jsonstr) - - // Convert json from API server to external type struct, and convert that to internal type struct - extKC := componentconfigv1alpha1.KubeletConfiguration{} - err := runtime.DecodeInto(api.Codecs.UniversalDecoder(), []byte(jsonstr), &extKC) - if err != nil { - return nil, err - } - api.Scheme.Default(&extKC) - kc := componentconfig.KubeletConfiguration{} - err = api.Scheme.Convert(&extKC, &kc, nil) - if err != nil { - return nil, err - } - return &kc, nil - } else { - // Couldn't get a configuration from the API server yet. - // Restart as soon as anything comes back from the API server. - startKubeletConfigSyncLoop(s, "") - return nil, err - } -} - -// Run runs the specified KubeletServer with the given Dependencies. This should never exit. +// Run runs the specified KubeletServer with the given Dependencies. This should never exit. // The kubeDeps argument may be nil - if so, it is initialized from the settings on KubeletServer. // Otherwise, the caller is assumed to have set up the Dependencies object and a default one will // not be generated. @@ -315,27 +204,6 @@ func initConfigz(kc *componentconfig.KubeletConfiguration) (*configz.Config, err return cz, err } -// validateConfig validates configuration of Kubelet and returns an error if the input configuration is invalid. -func validateConfig(s *options.KubeletServer) error { - if !s.CgroupsPerQOS && len(s.EnforceNodeAllocatable) > 0 { - return fmt.Errorf("Node Allocatable enforcement is not supported unless Cgroups Per QOS feature is turned on") - } - if s.SystemCgroups != "" && s.CgroupRoot == "" { - return fmt.Errorf("invalid configuration: system container was specified and cgroup root was not specified") - } - for _, val := range s.EnforceNodeAllocatable { - switch val { - case cm.NodeAllocatableEnforcementKey: - case cm.SystemReservedEnforcementKey: - case cm.KubeReservedEnforcementKey: - continue - default: - return fmt.Errorf("invalid option %q specified for EnforceNodeAllocatable setting. Valid options are %q, %q or %q", val, cm.NodeAllocatableEnforcementKey, cm.SystemReservedEnforcementKey, cm.KubeReservedEnforcementKey) - } - } - return nil -} - // makeEventRecorder sets up kubeDeps.Recorder if its nil. Its a no-op otherwise. func makeEventRecorder(s *componentconfig.KubeletConfiguration, kubeDeps *kubelet.Dependencies, nodeName types.NodeName) { if kubeDeps.Recorder != nil { @@ -353,20 +221,20 @@ func makeEventRecorder(s *componentconfig.KubeletConfiguration, kubeDeps *kubele } func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) { - - standaloneMode := true - switch { - case s.RequireKubeConfig == true: - standaloneMode = false - glog.Warningf("--require-kubeconfig is deprecated. Set --kubeconfig without using --require-kubeconfig.") - case s.KubeConfig.Provided(): - standaloneMode = false + // Set global feature gates based on the value on the initial KubeletServer + err = utilfeature.DefaultFeatureGate.Set(s.KubeletConfiguration.FeatureGates) + if err != nil { + return err + } + // validate the initial KubeletServer (we set feature gates first, because this validation depends on feature gates) + if err := options.ValidateKubeletServer(s); err != nil { + return err } + // Obtain Kubelet Lock File if s.ExitOnLockContention && s.LockFilePath == "" { return errors.New("cannot exit on lock file contention: no lock file specified") } - done := make(chan struct{}) if s.LockFilePath != "" { glog.Infof("acquiring file lock on %q", s.LockFilePath) @@ -381,44 +249,20 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) { } } - // Set feature gates based on the value in KubeletConfiguration - err = utilfeature.DefaultFeatureGate.Set(s.KubeletConfiguration.FeatureGates) - if err != nil { - return err - } - // Register current configuration with /configz endpoint - cfgz, cfgzErr := initConfigz(&s.KubeletConfiguration) - if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) { - // Look for config on the API server. If it exists, replace s.KubeletConfiguration - // with it and continue. initKubeletConfigSync also starts the background thread that checks for new config. - - // Don't do dynamic Kubelet configuration in runonce mode - if s.RunOnce == false { - remoteKC, err := initKubeletConfigSync(s) - if err == nil { - // Update s (KubeletServer) with new config from API server - s.KubeletConfiguration = *remoteKC - // Ensure that /configz is up to date with the new config - if cfgzErr != nil { - glog.Errorf("was unable to register configz before due to %s, will not be able to set now", cfgzErr) - } else { - setConfigz(cfgz, &s.KubeletConfiguration) - } - // Update feature gates from the new config - err = utilfeature.DefaultFeatureGate.Set(s.KubeletConfiguration.FeatureGates) - if err != nil { - return err - } - } else { - glog.Errorf("failed to init dynamic Kubelet configuration sync: %v", err) - } - } + _, err = initConfigz(&s.KubeletConfiguration) + if err != nil { + glog.Errorf("unable to register KubeletConfiguration with configz, error: %v", err) } - // Validate configuration. - if err := validateConfig(s); err != nil { - return err + // About to get clients and such, detect standaloneMode + standaloneMode := true + switch { + case s.RequireKubeConfig == true: + standaloneMode = false + glog.Warningf("--require-kubeconfig is deprecated. Set --kubeconfig without using --require-kubeconfig.") + case s.KubeConfig.Provided(): + standaloneMode = false } if kubeDeps == nil { @@ -518,6 +362,12 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) { } } + // Alpha Dynamic Configuration Implementation; + // if the kubelet config controller is available, inject the latest to start the config and status sync loops + if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) && kubeDeps.KubeletConfigController != nil && !standaloneMode && !s.RunOnce { + kubeDeps.KubeletConfigController.StartSync(kubeDeps.KubeClient, string(nodeName)) + } + if kubeDeps.Auth == nil { auth, err := BuildAuth(nodeName, kubeDeps.ExternalKubeClient, s.KubeletConfiguration) if err != nil { @@ -653,8 +503,8 @@ func getNodeName(cloud cloudprovider.Interface, hostname string) (types.NodeName // certificate and key file are generated. Returns a configured server.TLSOptions object. func InitializeTLS(kf *options.KubeletFlags, kc *componentconfig.KubeletConfiguration) (*server.TLSOptions, error) { if !utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletServerCertificate) && kc.TLSCertFile == "" && kc.TLSPrivateKeyFile == "" { - kc.TLSCertFile = path.Join(kc.CertDirectory, "kubelet.crt") - kc.TLSPrivateKeyFile = path.Join(kc.CertDirectory, "kubelet.key") + kc.TLSCertFile = path.Join(kf.CertDirectory, "kubelet.crt") + kc.TLSPrivateKeyFile = path.Join(kf.CertDirectory, "kubelet.key") canReadCertAndKey, err := certutil.CanReadCertAndKey(kc.TLSCertFile, kc.TLSPrivateKeyFile) if err != nil { @@ -795,8 +645,8 @@ func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *componentconfig.Kubele } capabilities.Setup(kubeCfg.AllowPrivileged, privilegedSources, 0) - credentialprovider.SetPreferredDockercfgPath(kubeCfg.RootDirectory) - glog.V(2).Infof("Using root directory: %v", kubeCfg.RootDirectory) + credentialprovider.SetPreferredDockercfgPath(kubeFlags.RootDirectory) + glog.V(2).Infof("Using root directory: %v", kubeFlags.RootDirectory) builder := kubeDeps.Builder if builder == nil { @@ -805,7 +655,8 @@ func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *componentconfig.Kubele if kubeDeps.OSInterface == nil { kubeDeps.OSInterface = kubecontainer.RealOS{} } - k, err := builder(kubeCfg, kubeDeps, &kubeFlags.ContainerRuntimeOptions, kubeFlags.HostnameOverride, kubeFlags.NodeIP, kubeFlags.ProviderID) + + k, err := builder(kubeCfg, kubeDeps, &kubeFlags.ContainerRuntimeOptions, kubeFlags.HostnameOverride, kubeFlags.NodeIP, kubeFlags.ProviderID, kubeFlags.CloudProvider, kubeFlags.CertDirectory, kubeFlags.RootDirectory) if err != nil { return fmt.Errorf("failed to create kubelet: %v", err) } @@ -849,11 +700,20 @@ func startKubelet(k kubelet.Bootstrap, podCfg *config.PodConfig, kubeCfg *compon } } -func CreateAndInitKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.Dependencies, crOptions *options.ContainerRuntimeOptions, hostnameOverride, nodeIP, providerID string) (k kubelet.Bootstrap, err error) { +func CreateAndInitKubelet(kubeCfg *componentconfig.KubeletConfiguration, + kubeDeps *kubelet.Dependencies, + crOptions *options.ContainerRuntimeOptions, + hostnameOverride, + nodeIP, + providerID, + cloudProvider, + certDirectory, + rootDirectory string) (k kubelet.Bootstrap, err error) { + // TODO: block until all sources have delivered at least one update to the channel, or break the sync loop // up into "per source" synchronizations - k, err = kubelet.NewMainKubelet(kubeCfg, kubeDeps, crOptions, hostnameOverride, nodeIP, providerID) + k, err = kubelet.NewMainKubelet(kubeCfg, kubeDeps, crOptions, hostnameOverride, nodeIP, providerID, cloudProvider, certDirectory, rootDirectory) if err != nil { return nil, err } @@ -896,6 +756,36 @@ func parseResourceList(m componentconfig.ConfigurationMap) (v1.ResourceList, err return rl, nil } +// BootstrapKubeletConfigController constructs and bootstrap a configuration controller +func BootstrapKubeletConfigController(flags *options.KubeletFlags, + defaultConfig *componentconfig.KubeletConfiguration) (*componentconfig.KubeletConfiguration, *kubeletconfig.Controller, error) { + var err error + // Alpha Dynamic Configuration Implementation; this section only loads config from disk, it does not contact the API server + // compute absolute paths based on current working dir + initConfigDir := "" + if flags.InitConfigDir.Provided() { + initConfigDir, err = filepath.Abs(flags.InitConfigDir.Value()) + if err != nil { + return nil, nil, fmt.Errorf("failed to get absolute path for --init-config-dir") + } + } + dynamicConfigDir := "" + if flags.DynamicConfigDir.Provided() { + dynamicConfigDir, err = filepath.Abs(flags.DynamicConfigDir.Value()) + if err != nil { + return nil, nil, fmt.Errorf("failed to get absolute path for --dynamic-config-dir") + } + } + + // get the latest KubeletConfiguration checkpoint from disk, or load the init or default config if no valid checkpoints exist + kubeletConfigController := kubeletconfig.NewController(initConfigDir, dynamicConfigDir, defaultConfig) + kubeletConfig, err := kubeletConfigController.Bootstrap() + if err != nil { + return nil, nil, fmt.Errorf("failed to determine a valid configuration, error: %v", err) + } + return kubeletConfig, kubeletConfigController, nil +} + // RunDockershim only starts the dockershim in current process. This is only used for cri validate testing purpose // TODO(random-liu): Move this to a separate binary. func RunDockershim(c *componentconfig.KubeletConfiguration, r *options.ContainerRuntimeOptions) error { diff --git a/cmd/kubelet/kubelet.go b/cmd/kubelet/kubelet.go index b9f1b5fc17..a55f9c68e2 100644 --- a/cmd/kubelet/kubelet.go +++ b/cmd/kubelet/kubelet.go @@ -24,36 +24,88 @@ import ( "fmt" "os" + "github.com/spf13/pflag" + + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/apiserver/pkg/util/flag" "k8s.io/apiserver/pkg/util/logs" "k8s.io/kubernetes/cmd/kubelet/app" "k8s.io/kubernetes/cmd/kubelet/app/options" + "k8s.io/kubernetes/pkg/apis/componentconfig" _ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration - _ "k8s.io/kubernetes/pkg/version/prometheus" // for version metric registration + "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/kubelet/kubeletconfig" + _ "k8s.io/kubernetes/pkg/version/prometheus" // for version metric registration "k8s.io/kubernetes/pkg/version/verflag" - - "github.com/spf13/pflag" ) -func main() { - s := options.NewKubeletServer() - s.AddFlags(pflag.CommandLine) +func die(err error) { + fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) +} +func main() { + // construct KubeletFlags object and register command line flags mapping + kubeletFlags := options.NewKubeletFlags() + kubeletFlags.AddFlags(pflag.CommandLine) + + // construct KubeletConfiguration object and register command line flags mapping + defaultConfig, err := options.NewKubeletConfiguration() + if err != nil { + die(err) + } + options.AddKubeletConfigFlags(pflag.CommandLine, defaultConfig) + + // parse the command line flags into the respective objects flag.InitFlags() + + // initialize logging and defer flush logs.InitLogs() defer logs.FlushLogs() + // short-circuit on verflag verflag.PrintAndExitIfRequested() - if s.ExperimentalDockershim { - if err := app.RunDockershim(&s.KubeletConfiguration, &s.ContainerRuntimeOptions); err != nil { - fmt.Fprintf(os.Stderr, "error: %v\n", err) - os.Exit(1) + // TODO(mtaufen): won't need this this once dynamic config is GA + // set feature gates so we can check if dynamic config is enabled + if err := utilfeature.DefaultFeatureGate.Set(defaultConfig.FeatureGates); err != nil { + die(err) + } + // validate the initial KubeletFlags, to make sure the dynamic-config-related flags aren't used unless the feature gate is on + if err := options.ValidateKubeletFlags(kubeletFlags); err != nil { + die(err) + } + // if dynamic kubelet config is enabled, bootstrap the kubelet config controller + var kubeletConfig *componentconfig.KubeletConfiguration + var kubeletConfigController *kubeletconfig.Controller + if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) { + var err error + kubeletConfig, kubeletConfigController, err = app.BootstrapKubeletConfigController(kubeletFlags, defaultConfig) + if err != nil { + die(err) + } + } else if kubeletConfig == nil { + kubeletConfig = defaultConfig + } + + // construct a KubeletServer from kubeletFlags and kubeletConfig + kubeletServer := &options.KubeletServer{KubeletFlags: *kubeletFlags, KubeletConfiguration: *kubeletConfig} + + // use kubeletServer to construct the default KubeletDeps + kubeletDeps, err := app.UnsecuredDependencies(kubeletServer) + + // add the kubelet config controller to kubeletDeps + kubeletDeps.KubeletConfigController = kubeletConfigController + + // start the experimental docker shim, if enabled + if kubeletFlags.ExperimentalDockershim { + if err := app.RunDockershim(kubeletConfig, &kubeletFlags.ContainerRuntimeOptions); err != nil { + die(err) } } - if err := app.Run(s, nil); err != nil { - fmt.Fprintf(os.Stderr, "error: %v\n", err) - os.Exit(1) + // run the kubelet + if err := app.Run(kubeletServer, kubeletDeps); err != nil { + die(err) } } diff --git a/hack/.golint_failures b/hack/.golint_failures index 136282523b..3e3974d262 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -793,7 +793,6 @@ staging/src/k8s.io/sample-apiserver/pkg/apiserver staging/src/k8s.io/sample-apiserver/pkg/client/informers_generated/externalversions/internalinterfaces staging/src/k8s.io/sample-apiserver/pkg/client/informers_generated/internalversion/internalinterfaces staging/src/k8s.io/sample-apiserver/pkg/cmd/server -staging/src/k8s.io/sample-apiserver/pkg/registry staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/fischer staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/flunder test/e2e diff --git a/hack/verify-flags/known-flags.txt b/hack/verify-flags/known-flags.txt index a3d243b78b..66e0ea3551 100644 --- a/hack/verify-flags/known-flags.txt +++ b/hack/verify-flags/known-flags.txt @@ -193,6 +193,7 @@ drop-embedded-fields dry-run dump-logs-on-failure duration-sec +dynamic-config-dir e2e-output-dir e2e-verify-service-account enable-aggregator-routing @@ -347,6 +348,7 @@ image-service-endpoint included-types-overrides include-extended-apis include-extended-apis +init-config-dir initial-sync-timeout input-base input-dirs @@ -490,6 +492,7 @@ network-plugin network-plugin-dir network-plugin-mtu node-cidr-mask-size +node-config-dir node-eviction-rate node-instance-group node-ip diff --git a/pkg/api/register.go b/pkg/api/register.go index 5f261bdfb5..39562e5a56 100644 --- a/pkg/api/register.go +++ b/pkg/api/register.go @@ -85,6 +85,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ServiceProxyOptions{}, &NodeList{}, &Node{}, + &NodeConfigSource{}, &NodeProxyOptions{}, &Endpoints{}, &EndpointsList{}, diff --git a/pkg/api/types.go b/pkg/api/types.go index ab9b7056d4..33371228fd 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -2913,6 +2913,19 @@ type NodeSpec struct { // If specified, the node's taints. // +optional Taints []Taint + + // If specified, the source to get node configuration from + // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field + // +optional + ConfigSource *NodeConfigSource +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeConfigSource specifies a source of node configuration. Exactly one subfield must be non-nil. +type NodeConfigSource struct { + metav1.TypeMeta + ConfigMapRef *ObjectReference } // DaemonEndpoint contains information about a single Daemon endpoint. diff --git a/pkg/api/validation/validation.go b/pkg/api/validation/validation.go index a6b4977442..bcb9069614 100644 --- a/pkg/api/validation/validation.go +++ b/pkg/api/validation/validation.go @@ -3338,6 +3338,11 @@ func ValidateNode(node *api.Node) field.ErrorList { allErrs = append(allErrs, field.Required(field.NewPath("spec", "externalID"), "")) } + // Only allow Node.Spec.ConfigSource to be set if the DynamicKubeletConfig feature gate is enabled + if node.Spec.ConfigSource != nil && !utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "configSource"), "configSource may only be set if the DynamicKubeletConfig feature gate is enabled)")) + } + // TODO(rjnagal): Ignore PodCIDR till its completely implemented. return allErrs } @@ -3365,7 +3370,7 @@ func ValidateNodeUpdate(node, oldNode *api.Node) field.ErrorList { allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) } - // Validte no duplicate addresses in node status. + // Validate no duplicate addresses in node status. addresses := make(map[api.NodeAddress]bool) for i, address := range node.Status.Addresses { if _, ok := addresses[address]; ok { @@ -3398,10 +3403,16 @@ func ValidateNodeUpdate(node, oldNode *api.Node) field.ErrorList { } oldNode.Spec.Taints = node.Spec.Taints + // Allow updates to Node.Spec.ConfigSource if DynamicKubeletConfig feature gate is enabled + if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) { + oldNode.Spec.ConfigSource = node.Spec.ConfigSource + } + + // We made allowed changes to oldNode, and now we compare oldNode to node. Any remaining differences indicate changes to protected fields. // TODO: Add a 'real' error type for this error and provide print actual diffs. if !apiequality.Semantic.DeepEqual(oldNode, node) { glog.V(4).Infof("Update failed validation %#v vs %#v", oldNode, node) - allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "node updates may only change labels, taints or capacity")) + allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "node updates may only change labels, taints, or capacity (or configSource, if the DynamicKubeletConfig feature gate is enabled)")) } return allErrs diff --git a/pkg/apis/componentconfig/BUILD b/pkg/apis/componentconfig/BUILD index d651fa8949..f6d6ab2b2e 100644 --- a/pkg/apis/componentconfig/BUILD +++ b/pkg/apis/componentconfig/BUILD @@ -50,6 +50,7 @@ filegroup( ":package-srcs", "//pkg/apis/componentconfig/install:all-srcs", "//pkg/apis/componentconfig/v1alpha1:all-srcs", + "//pkg/apis/componentconfig/validation:all-srcs", ], tags = ["automanaged"], ) diff --git a/pkg/apis/componentconfig/types.go b/pkg/apis/componentconfig/types.go index f3ff349776..2c0a7b2fe4 100644 --- a/pkg/apis/componentconfig/types.go +++ b/pkg/apis/componentconfig/types.go @@ -180,6 +180,18 @@ const ( type KubeletConfiguration struct { metav1.TypeMeta + // Only used for dynamic configuration. + // The length of the trial period for this configuration. If the Kubelet records CrashLoopThreshold or + // more startups during this period, the current configuration will be marked bad and the + // Kubelet will roll-back to the last-known-good. Default 10 minutes. + ConfigTrialDuration metav1.Duration + // Only used for dynamic configuration. + // If this number of Kubelet "crashes" during ConfigTrialDuration meets this threshold, + // the configuration fails the trial and the Kubelet rolls back to its last-known-good config. + // Crash-loops are detected by counting Kubelet startups, so one startup is implicitly added + // to this threshold to always allow a single restart per config change. + // Default 10, mimimum allowed is 0, maximum allowed is 10. + CrashLoopThreshold int32 // podManifestPath is the path to the directory containing pod manifests to // run, or the path to a single manifest file PodManifestPath string @@ -215,17 +227,10 @@ type KubeletConfiguration struct { // tlsPrivateKeyFile is the ile containing x509 private key matching // tlsCertFile. TLSPrivateKeyFile string - // certDirectory is the directory where the TLS certs are located (by - // default /var/run/kubernetes). If tlsCertFile and tlsPrivateKeyFile - // are provided, this flag will be ignored. - CertDirectory string // authentication specifies how requests to the Kubelet's server are authenticated Authentication KubeletAuthentication // authorization specifies how requests to the Kubelet's server are authorized Authorization KubeletAuthorization - // rootDirectory is the directory path to place kubelet files (volume - // mounts,etc). - RootDirectory string // seccompProfileRoot is the directory path for seccomp profiles. SeccompProfileRoot string // allowPrivileged enables containers to request privileged mode. @@ -314,12 +319,6 @@ type KubeletConfiguration struct { // volumePluginDir is the full path of the directory in which to search // for additional third party volume plugins VolumePluginDir string - // cloudProvider is the provider for cloud services. - // +optional - CloudProvider string - // cloudConfigFile is the path to the cloud provider configuration file. - // +optional - CloudConfigFile string // KubeletCgroups is the absolute name of cgroups to isolate the kubelet in. // +optional KubeletCgroups string diff --git a/pkg/apis/componentconfig/v1alpha1/defaults.go b/pkg/apis/componentconfig/v1alpha1/defaults.go index 8b34965684..bb501e17a2 100644 --- a/pkg/apis/componentconfig/v1alpha1/defaults.go +++ b/pkg/apis/componentconfig/v1alpha1/defaults.go @@ -34,7 +34,7 @@ import ( ) const ( - defaultRootDir = "/var/lib/kubelet" + DefaultRootDir = "/var/lib/kubelet" AutoDetectCloudProvider = "auto-detect" @@ -192,6 +192,13 @@ func SetDefaults_LeaderElectionConfiguration(obj *LeaderElectionConfiguration) { } func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) { + // pointer because the zeroDuration is valid - if you want to skip the trial period + if obj.ConfigTrialDuration == nil { + obj.ConfigTrialDuration = &metav1.Duration{Duration: 10 * time.Minute} + } + if obj.CrashLoopThreshold == nil { + obj.CrashLoopThreshold = utilpointer.Int32Ptr(10) + } if obj.Authentication.Anonymous.Enabled == nil { obj.Authentication.Anonymous.Enabled = boolVar(true) } @@ -214,18 +221,12 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) { if obj.Address == "" { obj.Address = "0.0.0.0" } - if obj.CloudProvider == "" { - obj.CloudProvider = AutoDetectCloudProvider - } if obj.CAdvisorPort == nil { obj.CAdvisorPort = utilpointer.Int32Ptr(4194) } if obj.VolumeStatsAggPeriod == zeroDuration { obj.VolumeStatsAggPeriod = metav1.Duration{Duration: time.Minute} } - if obj.CertDirectory == "" { - obj.CertDirectory = "/var/run/kubernetes" - } if obj.ContainerRuntime == "" { obj.ContainerRuntime = "docker" } @@ -338,14 +339,11 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) { if obj.ResolverConfig == "" { obj.ResolverConfig = kubetypes.ResolvConfDefault } - if obj.RootDirectory == "" { - obj.RootDirectory = defaultRootDir - } if obj.SerializeImagePulls == nil { obj.SerializeImagePulls = boolVar(true) } if obj.SeccompProfileRoot == "" { - obj.SeccompProfileRoot = filepath.Join(defaultRootDir, "seccomp") + obj.SeccompProfileRoot = filepath.Join(DefaultRootDir, "seccomp") } if obj.StreamingConnectionIdleTimeout == zeroDuration { obj.StreamingConnectionIdleTimeout = metav1.Duration{Duration: 4 * time.Hour} diff --git a/pkg/apis/componentconfig/v1alpha1/types.go b/pkg/apis/componentconfig/v1alpha1/types.go index 77fd7d4251..12941dc610 100644 --- a/pkg/apis/componentconfig/v1alpha1/types.go +++ b/pkg/apis/componentconfig/v1alpha1/types.go @@ -256,6 +256,18 @@ type LeaderElectionConfiguration struct { type KubeletConfiguration struct { metav1.TypeMeta `json:",inline"` + // Only used for dynamic configuration. + // The length of the trial period for this configuration. If the Kubelet records CrashLoopThreshold or + // more startups during this period, the current configuration will be marked bad and the + // Kubelet will roll-back to the last-known-good. Default 10 minutes. + ConfigTrialDuration *metav1.Duration `json:"configTrialDuration"` + // Only used for dynamic configuration. + // If this number of Kubelet "crashes" during ConfigTrialDuration meets this threshold, + // the configuration fails the trial and the Kubelet rolls back to its last-known-good config. + // Crash-loops are detected by counting Kubelet startups, so one startup is implicitly added + // to this threshold to always allow a single restart per config change. + // Default 10, mimimum allowed is 0, maximum allowed is 10. + CrashLoopThreshold *int32 `json:"crashLoopThreshold"` // podManifestPath is the path to the directory containing pod manifests to // run, or the path to a single manifest file PodManifestPath string `json:"podManifestPath"` @@ -291,17 +303,10 @@ type KubeletConfiguration struct { // tlsPrivateKeyFile is the ile containing x509 private key matching // tlsCertFile. TLSPrivateKeyFile string `json:"tlsPrivateKeyFile"` - // certDirectory is the directory where the TLS certs are located (by - // default /var/run/kubernetes). If tlsCertFile and tlsPrivateKeyFile - // are provided, this flag will be ignored. - CertDirectory string `json:"certDirectory"` // authentication specifies how requests to the Kubelet's server are authenticated Authentication KubeletAuthentication `json:"authentication"` // authorization specifies how requests to the Kubelet's server are authorized Authorization KubeletAuthorization `json:"authorization"` - // rootDirectory is the directory path to place kubelet files (volume - // mounts,etc). - RootDirectory string `json:"rootDirectory"` // seccompProfileRoot is the directory path for seccomp profiles. SeccompProfileRoot string `json:"seccompProfileRoot"` // allowPrivileged enables containers to request privileged mode. @@ -391,10 +396,6 @@ type KubeletConfiguration struct { // volumePluginDir is the full path of the directory in which to search // for additional third party volume plugins VolumePluginDir string `json:"volumePluginDir"` - // cloudProvider is the provider for cloud services. - CloudProvider string `json:"cloudProvider"` - // cloudConfigFile is the path to the cloud provider configuration file. - CloudConfigFile string `json:"cloudConfigFile"` // kubeletCgroups is the absolute name of cgroups to isolate the kubelet in. KubeletCgroups string `json:"kubeletCgroups"` // runtimeCgroups are cgroups that container runtime is expected to be isolated in. diff --git a/pkg/apis/componentconfig/validation/BUILD b/pkg/apis/componentconfig/validation/BUILD new file mode 100644 index 0000000000..7759085934 --- /dev/null +++ b/pkg/apis/componentconfig/validation/BUILD @@ -0,0 +1,31 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["validation.go"], + tags = ["automanaged"], + deps = [ + "//pkg/apis/componentconfig:go_default_library", + "//pkg/kubelet/cm:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/pkg/apis/componentconfig/validation/validation.go b/pkg/apis/componentconfig/validation/validation.go new file mode 100644 index 0000000000..3931672c72 --- /dev/null +++ b/pkg/apis/componentconfig/validation/validation.go @@ -0,0 +1,56 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/apis/componentconfig" + containermanager "k8s.io/kubernetes/pkg/kubelet/cm" +) + +// MaxCrashLoopThreshold is the maximum allowed KubeletConfiguraiton.CrashLoopThreshold +const MaxCrashLoopThreshold = 10 + +// ValidateKubeletConfiguration validates `kc` and returns an error if it is invalid +func ValidateKubeletConfiguration(kc *componentconfig.KubeletConfiguration) error { + // restrict crashloop threshold to between 0 and `maxCrashLoopThreshold`, inclusive + // more than `maxStartups=maxCrashLoopThreshold` adds unnecessary bloat to the .startups.json file, + // and negative values would be silly. + if kc.CrashLoopThreshold < 0 || kc.CrashLoopThreshold > MaxCrashLoopThreshold { + return fmt.Errorf("field `CrashLoopThreshold` must be between 0 and %d, inclusive", MaxCrashLoopThreshold) + } + + if !kc.CgroupsPerQOS && len(kc.EnforceNodeAllocatable) > 0 { + return fmt.Errorf("node allocatable enforcement is not supported unless Cgroups Per QOS feature is turned on") + } + if kc.SystemCgroups != "" && kc.CgroupRoot == "" { + return fmt.Errorf("invalid configuration: system container was specified and cgroup root was not specified") + } + for _, val := range kc.EnforceNodeAllocatable { + switch val { + case containermanager.NodeAllocatableEnforcementKey: + case containermanager.SystemReservedEnforcementKey: + case containermanager.KubeReservedEnforcementKey: + continue + default: + return fmt.Errorf("invalid option %q specified for EnforceNodeAllocatable setting. Valid options are %q, %q or %q", + val, containermanager.NodeAllocatableEnforcementKey, containermanager.SystemReservedEnforcementKey, containermanager.KubeReservedEnforcementKey) + } + } + return nil +} diff --git a/pkg/kubelet/BUILD b/pkg/kubelet/BUILD index 607eaafe7c..495036c105 100644 --- a/pkg/kubelet/BUILD +++ b/pkg/kubelet/BUILD @@ -64,6 +64,7 @@ go_library( "//pkg/kubelet/gpu:go_default_library", "//pkg/kubelet/gpu/nvidia:go_default_library", "//pkg/kubelet/images:go_default_library", + "//pkg/kubelet/kubeletconfig:go_default_library", "//pkg/kubelet/kuberuntime:go_default_library", "//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/metrics:go_default_library", @@ -256,6 +257,7 @@ filegroup( "//pkg/kubelet/eviction:all-srcs", "//pkg/kubelet/gpu:all-srcs", "//pkg/kubelet/images:all-srcs", + "//pkg/kubelet/kubeletconfig:all-srcs", "//pkg/kubelet/kuberuntime:all-srcs", "//pkg/kubelet/leaky:all-srcs", "//pkg/kubelet/lifecycle:all-srcs", diff --git a/pkg/kubelet/certificate/kubelet.go b/pkg/kubelet/certificate/kubelet.go index aed82739c1..5c85c66f22 100644 --- a/pkg/kubelet/certificate/kubelet.go +++ b/pkg/kubelet/certificate/kubelet.go @@ -31,15 +31,15 @@ import ( // NewKubeletServerCertificateManager creates a certificate manager for the kubelet when retrieving a server certificate // or returns an error. -func NewKubeletServerCertificateManager(kubeClient clientset.Interface, kubeCfg *componentconfig.KubeletConfiguration, nodeName types.NodeName, ips []net.IP, hostnames []string) (Manager, error) { +func NewKubeletServerCertificateManager(kubeClient clientset.Interface, kubeCfg *componentconfig.KubeletConfiguration, nodeName types.NodeName, ips []net.IP, hostnames []string, certDirectory string) (Manager, error) { var certSigningRequestClient clientcertificates.CertificateSigningRequestInterface if kubeClient != nil && kubeClient.Certificates() != nil { certSigningRequestClient = kubeClient.Certificates().CertificateSigningRequests() } certificateStore, err := NewFileStore( "kubelet-server", - kubeCfg.CertDirectory, - kubeCfg.CertDirectory, + certDirectory, + certDirectory, kubeCfg.TLSCertFile, kubeCfg.TLSPrivateKeyFile) if err != nil { diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index b562e5e9be..03e43ed1d7 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -74,6 +74,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/gpu" "k8s.io/kubernetes/pkg/kubelet/gpu/nvidia" "k8s.io/kubernetes/pkg/kubelet/images" + "k8s.io/kubernetes/pkg/kubelet/kubeletconfig" "k8s.io/kubernetes/pkg/kubelet/kuberuntime" "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/metrics" @@ -189,7 +190,15 @@ type Bootstrap interface { } // Builder creates and initializes a Kubelet instance -type Builder func(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Dependencies, crOptions *options.ContainerRuntimeOptions, hostnameOverride, nodeIP, providerID string) (Bootstrap, error) +type Builder func(kubeCfg *componentconfig.KubeletConfiguration, + kubeDeps *Dependencies, + crOptions *options.ContainerRuntimeOptions, + hostnameOverride, + nodeIP, + providerID, + cloudProvider, + certDirectory, + rootDirectory string) (Bootstrap, error) // Dependencies is a bin for things we might consider "injected dependencies" -- objects constructed // at runtime that are necessary for running the Kubelet. This is a temporary solution for grouping @@ -218,23 +227,24 @@ type Dependencies struct { Options []Option // Injected Dependencies - Auth server.AuthInterface - CAdvisorInterface cadvisor.Interface - Cloud cloudprovider.Interface - ContainerManager cm.ContainerManager - DockerClient libdocker.Interface - EventClient v1core.EventsGetter - KubeClient clientset.Interface - ExternalKubeClient clientgoclientset.Interface - Mounter mount.Interface - NetworkPlugins []network.NetworkPlugin - OOMAdjuster *oom.OOMAdjuster - OSInterface kubecontainer.OSInterface - PodConfig *config.PodConfig - Recorder record.EventRecorder - Writer kubeio.Writer - VolumePlugins []volume.VolumePlugin - TLSOptions *server.TLSOptions + Auth server.AuthInterface + CAdvisorInterface cadvisor.Interface + Cloud cloudprovider.Interface + ContainerManager cm.ContainerManager + DockerClient libdocker.Interface + EventClient v1core.EventsGetter + KubeClient clientset.Interface + ExternalKubeClient clientgoclientset.Interface + Mounter mount.Interface + NetworkPlugins []network.NetworkPlugin + OOMAdjuster *oom.OOMAdjuster + OSInterface kubecontainer.OSInterface + PodConfig *config.PodConfig + Recorder record.EventRecorder + Writer kubeio.Writer + VolumePlugins []volume.VolumePlugin + TLSOptions *server.TLSOptions + KubeletConfigController *kubeletconfig.Controller } // makePodSourceConfig creates a config.PodConfig from the given @@ -284,9 +294,17 @@ func getRuntimeAndImageServices(config *componentconfig.KubeletConfiguration) (i // NewMainKubelet instantiates a new Kubelet object along with all the required internal modules. // No initialization of Kubelet and its modules should happen here. -func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Dependencies, crOptions *options.ContainerRuntimeOptions, hostnameOverride, nodeIP, providerID string) (*Kubelet, error) { - if kubeCfg.RootDirectory == "" { - return nil, fmt.Errorf("invalid root directory %q", kubeCfg.RootDirectory) +func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, + kubeDeps *Dependencies, + crOptions *options.ContainerRuntimeOptions, + hostnameOverride, + nodeIP, + providerID, + cloudProvider, + certDirectory, + rootDirectory string) (*Kubelet, error) { + if rootDirectory == "" { + return nil, fmt.Errorf("invalid root directory %q", rootDirectory) } if kubeCfg.SyncFrequency.Duration <= 0 { return nil, fmt.Errorf("invalid sync frequency %d", kubeCfg.SyncFrequency.Duration) @@ -429,7 +447,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Dep hostname: hostname, nodeName: nodeName, kubeClient: kubeDeps.KubeClient, - rootDirectory: kubeCfg.RootDirectory, + rootDirectory: rootDirectory, resyncInterval: kubeCfg.SyncFrequency.Duration, sourcesReady: config.NewSourcesReady(kubeDeps.PodConfig.SeenAllSources), registerNode: kubeCfg.RegisterNode, @@ -443,8 +461,8 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Dep recorder: kubeDeps.Recorder, cadvisor: kubeDeps.CAdvisorInterface, cloud: kubeDeps.Cloud, - autoDetectCloudProvider: (componentconfigv1alpha1.AutoDetectCloudProvider == kubeCfg.CloudProvider), - externalCloudProvider: cloudprovider.IsExternal(kubeCfg.CloudProvider), + autoDetectCloudProvider: (componentconfigv1alpha1.AutoDetectCloudProvider == cloudProvider), + externalCloudProvider: cloudprovider.IsExternal(cloudProvider), providerID: providerID, nodeRef: nodeRef, nodeLabels: kubeCfg.NodeLabels, @@ -696,7 +714,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Dep ips = append(ips, cloudIPs...) names := append([]string{klet.GetHostname(), hostnameOverride}, cloudNames...) - klet.serverCertificateManager, err = certificate.NewKubeletServerCertificateManager(klet.kubeClient, kubeCfg, klet.nodeName, ips, names) + klet.serverCertificateManager, err = certificate.NewKubeletServerCertificateManager(klet.kubeClient, kubeCfg, klet.nodeName, ips, names, certDirectory) if err != nil { return nil, fmt.Errorf("failed to initialize certificate manager: %v", err) } diff --git a/pkg/kubelet/kubeletconfig/BUILD b/pkg/kubelet/kubeletconfig/BUILD new file mode 100644 index 0000000000..0742af769e --- /dev/null +++ b/pkg/kubelet/kubeletconfig/BUILD @@ -0,0 +1,69 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "configsync.go", + "controller.go", + "rollback.go", + "watch.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/componentconfig:go_default_library", + "//pkg/apis/componentconfig/validation:go_default_library", + "//pkg/kubelet/kubeletconfig/badconfig:go_default_library", + "//pkg/kubelet/kubeletconfig/checkpoint:go_default_library", + "//pkg/kubelet/kubeletconfig/checkpoint/store:go_default_library", + "//pkg/kubelet/kubeletconfig/configfiles:go_default_library", + "//pkg/kubelet/kubeletconfig/startups:go_default_library", + "//pkg/kubelet/kubeletconfig/status:go_default_library", + "//pkg/kubelet/kubeletconfig/util/equal:go_default_library", + "//pkg/kubelet/kubeletconfig/util/filesystem:go_default_library", + "//pkg/kubelet/kubeletconfig/util/log:go_default_library", + "//pkg/kubelet/kubeletconfig/util/panic:go_default_library", + "//pkg/version:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/kubelet/kubeletconfig/badconfig:all-srcs", + "//pkg/kubelet/kubeletconfig/checkpoint:all-srcs", + "//pkg/kubelet/kubeletconfig/configfiles:all-srcs", + "//pkg/kubelet/kubeletconfig/startups:all-srcs", + "//pkg/kubelet/kubeletconfig/status:all-srcs", + "//pkg/kubelet/kubeletconfig/util/codec:all-srcs", + "//pkg/kubelet/kubeletconfig/util/equal:all-srcs", + "//pkg/kubelet/kubeletconfig/util/files:all-srcs", + "//pkg/kubelet/kubeletconfig/util/filesystem:all-srcs", + "//pkg/kubelet/kubeletconfig/util/log:all-srcs", + "//pkg/kubelet/kubeletconfig/util/panic:all-srcs", + "//pkg/kubelet/kubeletconfig/util/test:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/pkg/kubelet/kubeletconfig/OWNERS b/pkg/kubelet/kubeletconfig/OWNERS new file mode 100644 index 0000000000..99d321aac1 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/OWNERS @@ -0,0 +1,5 @@ +approvers: +- mtaufen +- dchen1107 +reviewers: +- sig-node-reviewers diff --git a/pkg/kubelet/kubeletconfig/badconfig/BUILD b/pkg/kubelet/kubeletconfig/badconfig/BUILD new file mode 100644 index 0000000000..8feb477540 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/badconfig/BUILD @@ -0,0 +1,51 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = [ + "badconfig_test.go", + "fstracker_test.go", + ], + library = ":go_default_library", + tags = ["automanaged"], + deps = [ + "//pkg/kubelet/kubeletconfig/util/files:go_default_library", + "//pkg/kubelet/kubeletconfig/util/filesystem:go_default_library", + "//pkg/kubelet/kubeletconfig/util/test:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = [ + "badconfig.go", + "fstracker.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/kubelet/kubeletconfig/util/files:go_default_library", + "//pkg/kubelet/kubeletconfig/util/filesystem:go_default_library", + "//pkg/kubelet/kubeletconfig/util/log:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/pkg/kubelet/kubeletconfig/badconfig/badconfig.go b/pkg/kubelet/kubeletconfig/badconfig/badconfig.go new file mode 100644 index 0000000000..0374192695 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/badconfig/badconfig.go @@ -0,0 +1,83 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package badconfig + +import ( + "encoding/json" + "fmt" + "time" +) + +// Tracker tracks "bad" configurations in a storage layer +type Tracker interface { + // Initialize sets up the storage layer + Initialize() error + // MarkBad marks `uid` as a bad config and records `reason` as the reason for marking it bad + MarkBad(uid, reason string) error + // Entry returns the Entry for `uid` if it exists in the tracker, otherise nil + Entry(uid string) (*Entry, error) +} + +// Entry describes when a configuration was marked bad and why +type Entry struct { + Time string `json:"time"` + Reason string `json:"reason"` +} + +// markBad makes an entry in `m` for the config with `uid` and reason `reason` +func markBad(m map[string]Entry, uid, reason string) { + now := time.Now() + entry := Entry{ + Time: now.Format(time.RFC3339), // use RFC3339 time format + Reason: reason, + } + m[uid] = entry +} + +// getEntry returns the Entry for `uid` in `m`, or nil if no such entry exists +func getEntry(m map[string]Entry, uid string) *Entry { + entry, ok := m[uid] + if ok { + return &entry + } + return nil +} + +// encode retuns a []byte representation of `m`, for saving `m` to a storage layer +func encode(m map[string]Entry) ([]byte, error) { + data, err := json.Marshal(m) + if err != nil { + return nil, err + } + return data, nil +} + +// decode transforms a []byte into a `map[string]Entry`, or returns an error if it can't produce said map +// if `data` is empty, returns an empty map +func decode(data []byte) (map[string]Entry, error) { + // create the map + m := map[string]Entry{} + // if the data is empty, just return the empty map + if len(data) == 0 { + return m, nil + } + // otherwise unmarshal the json + if err := json.Unmarshal(data, &m); err != nil { + return nil, fmt.Errorf("failed to unmarshal, error: %v", err) + } + return m, nil +} diff --git a/pkg/kubelet/kubeletconfig/badconfig/badconfig_test.go b/pkg/kubelet/kubeletconfig/badconfig/badconfig_test.go new file mode 100644 index 0000000000..617219eeb6 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/badconfig/badconfig_test.go @@ -0,0 +1,157 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package badconfig + +import ( + "fmt" + "reflect" + "testing" + "time" +) + +func TestMarkBad(t *testing.T) { + // build a map with one entry + m := map[string]Entry{} + uid := "uid" + reason := "reason" + markBad(m, uid, reason) + + // the entry should exist for uid + entry, ok := m[uid] + if !ok { + t.Fatalf("expect entry for uid %q, but none exists", uid) + } + + // the entry's reason should match the reason it was marked bad with + if entry.Reason != reason { + t.Errorf("expect Entry.Reason %q, but got %q", reason, entry.Reason) + } + + // the entry's timestamp should be in RFC3339 format + if err := assertRFC3339(entry.Time); err != nil { + t.Errorf("expect Entry.Time to use RFC3339 format, but got %q, error: %v", entry.Time, err) + } + + // it should be the only entry in the map thus far + if n := len(m); n != 1 { + t.Errorf("expect one entry in the map, but got %d", n) + } + +} + +func TestGetEntry(t *testing.T) { + nowstamp := time.Now().Format(time.RFC3339) + uid := "uid" + expect := &Entry{ + Time: nowstamp, + Reason: "reason", + } + m := map[string]Entry{uid: *expect} + + // should return nil for entries that don't exist + bogus := "bogus-uid" + if e := getEntry(m, bogus); e != nil { + t.Errorf("expect nil for entries that don't exist (uid: %q), but got %#v", bogus, e) + } + + // should return non-nil for entries that exist + if e := getEntry(m, uid); e == nil { + t.Errorf("expect non-nil for entries that exist (uid: %q), but got nil", uid) + } else if !reflect.DeepEqual(expect, e) { + // entry should match what we inserted for the given UID + t.Errorf("expect entry for uid %q to match %#v, but got %#v", uid, expect, e) + } +} + +func TestEncode(t *testing.T) { + nowstamp := time.Now().Format(time.RFC3339) + uid := "uid" + expect := fmt.Sprintf(`{"%s":{"time":"%s","reason":"reason"}}`, uid, nowstamp) + m := map[string]Entry{uid: { + Time: nowstamp, + Reason: "reason", + }} + + data, err := encode(m) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + json := string(data) + + if json != expect { + t.Errorf("expect encoding of %#v to match %q, but got %q", m, expect, json) + } +} + +func TestDecode(t *testing.T) { + nowstamp := time.Now().Format(time.RFC3339) + uid := "uid" + valid := []byte(fmt.Sprintf(`{"%s":{"time":"%s","reason":"reason"}}`, uid, nowstamp)) + expect := map[string]Entry{uid: { + Time: nowstamp, + Reason: "reason", + }} + + // decoding valid json should result in an object with the correct values + if m, err := decode(valid); err != nil { + t.Errorf("expect decoding valid json %q to produce a map, but got error: %v", valid, err) + } else if !reflect.DeepEqual(expect, m) { + // m should equal expected decoded object + t.Errorf("expect decoding valid json %q to produce %#v, but got %#v", valid, expect, m) + } + + // decoding invalid json should return an error + invalid := []byte(`invalid`) + if m, err := decode(invalid); err == nil { + t.Errorf("expect decoding invalid json %q to return an error, but decoded to %#v", invalid, m) + } +} + +func TestRoundTrip(t *testing.T) { + nowstamp := time.Now().Format(time.RFC3339) + uid := "uid" + expect := map[string]Entry{uid: { + Time: nowstamp, + Reason: "reason", + }} + + // test that encoding and decoding an object results in the same value + data, err := encode(expect) + if err != nil { + t.Fatalf("failed to encode %#v, error: %v", expect, err) + } + after, err := decode(data) + if err != nil { + t.Fatalf("failed to decode %q, error: %v", string(data), err) + } + if !reflect.DeepEqual(expect, after) { + t.Errorf("expect round-tripping %#v to result in the same value, but got %#v", expect, after) + } +} + +func assertRFC3339(s string) error { + tm, err := time.Parse(time.RFC3339, s) + if err != nil { + return fmt.Errorf("expect RFC3339 format, but failed to parse, error: %v", err) + } + // parsing succeeded, now finish round-trip and compare + rt := tm.Format(time.RFC3339) + if rt != s { + return fmt.Errorf("expect RFC3339 format, but failed to round trip unchanged, original %q, round-trip %q", s, rt) + } + return nil +} diff --git a/pkg/kubelet/kubeletconfig/badconfig/fstracker.go b/pkg/kubelet/kubeletconfig/badconfig/fstracker.go new file mode 100644 index 0000000000..a90ec26dd5 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/badconfig/fstracker.go @@ -0,0 +1,105 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package badconfig + +import ( + "path/filepath" + + utilfiles "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files" + utilfs "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/filesystem" + utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log" +) + +const ( + badConfigsFile = "bad-configs.json" +) + +// fsTracker tracks bad config in the local filesystem +type fsTracker struct { + // fs is the filesystem to use for storage operations; can be mocked for testing + fs utilfs.Filesystem + // trackingDir is the absolute path to the storage directory for fsTracker + trackingDir string +} + +// NewFsTracker returns a new Tracker that will store information in the `trackingDir` +func NewFsTracker(fs utilfs.Filesystem, trackingDir string) Tracker { + return &fsTracker{ + fs: fs, + trackingDir: trackingDir, + } +} + +func (tracker *fsTracker) Initialize() error { + utillog.Infof("initializing bad config tracking directory %q", tracker.trackingDir) + if err := utilfiles.EnsureDir(tracker.fs, tracker.trackingDir); err != nil { + return err + } + if err := utilfiles.EnsureFile(tracker.fs, filepath.Join(tracker.trackingDir, badConfigsFile)); err != nil { + return err + } + return nil +} + +func (tracker *fsTracker) MarkBad(uid, reason string) error { + m, err := tracker.load() + if err != nil { + return err + } + // create the bad config entry in the map + markBad(m, uid, reason) + // save the file + if err := tracker.save(m); err != nil { + return err + } + return nil +} + +func (tracker *fsTracker) Entry(uid string) (*Entry, error) { + m, err := tracker.load() + if err != nil { + return nil, err + } + // return the entry, or nil if it doesn't exist + return getEntry(m, uid), nil +} + +// load loads the bad-config-tracking file from disk and decodes the map encoding it contains +func (tracker *fsTracker) load() (map[string]Entry, error) { + path := filepath.Join(tracker.trackingDir, badConfigsFile) + // load the file + data, err := tracker.fs.ReadFile(path) + if err != nil { + return nil, err + } + return decode(data) +} + +// save replaces the contents of the bad-config-tracking file with the encoding of `m` +func (tracker *fsTracker) save(m map[string]Entry) error { + // encode the map + data, err := encode(m) + if err != nil { + return err + } + // save the file + path := filepath.Join(tracker.trackingDir, badConfigsFile) + if err := utilfiles.ReplaceFile(tracker.fs, path, data); err != nil { + return err + } + return nil +} diff --git a/pkg/kubelet/kubeletconfig/badconfig/fstracker_test.go b/pkg/kubelet/kubeletconfig/badconfig/fstracker_test.go new file mode 100644 index 0000000000..25b234b18b --- /dev/null +++ b/pkg/kubelet/kubeletconfig/badconfig/fstracker_test.go @@ -0,0 +1,255 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package badconfig + +import ( + "fmt" + "path/filepath" + "reflect" + "testing" + "time" + + utilfiles "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files" + utilfs "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/filesystem" + utiltest "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/test" +) + +const testTrackingDir = "/test-tracking-dir" + +// TODO(mtaufen): this file reuses a lot of test code from badconfig_test.go, should consolidate + +func newInitializedFakeFsTracker() (*fsTracker, error) { + fs := utilfs.NewFakeFs() + tracker := NewFsTracker(fs, testTrackingDir) + if err := tracker.Initialize(); err != nil { + return nil, err + } + return tracker.(*fsTracker), nil +} + +func TestFsTrackerInitialize(t *testing.T) { + tracker, err := newInitializedFakeFsTracker() + if err != nil { + t.Fatalf("fsTracker.Initialize() failed with error: %v", err) + } + + // check that testTrackingDir exists + _, err = tracker.fs.Stat(testTrackingDir) + if err != nil { + t.Fatalf("expect %q to exist, but stat failed with error: %v", testTrackingDir, err) + } + + // check that testTrackingDir contains the badConfigsFile + path := filepath.Join(testTrackingDir, badConfigsFile) + _, err = tracker.fs.Stat(path) + if err != nil { + t.Fatalf("expect %q to exist, but stat failed with error: %v", path, err) + } +} + +func TestFsTrackerMarkBad(t *testing.T) { + tracker, err := newInitializedFakeFsTracker() + if err != nil { + t.Fatalf("failed to construct a tracker, error: %v", err) + } + + // create a bad config entry in the fs + uid := "uid" + reason := "reason" + tracker.MarkBad(uid, reason) + + // load the map from the fs + m, err := tracker.load() + if err != nil { + t.Fatalf("failed to load bad-config data, error: %v", err) + } + + // the entry should exist for uid + entry, ok := m[uid] + if !ok { + t.Fatalf("expect entry for uid %q, but none exists", uid) + } + + // the entry's reason should match the reason it was marked bad with + if entry.Reason != reason { + t.Errorf("expect Entry.Reason %q, but got %q", reason, entry.Reason) + } + + // the entry's timestamp should be in RFC3339 format + if err := assertRFC3339(entry.Time); err != nil { + t.Errorf("expect Entry.Time to use RFC3339 format, but got %q, error: %v", entry.Time, err) + } + + // it should be the only entry in the map thus far + if n := len(m); n != 1 { + t.Errorf("expect one entry in the map, but got %d", n) + } +} + +func TestFsTrackerEntry(t *testing.T) { + tracker, err := newInitializedFakeFsTracker() + if err != nil { + t.Fatalf("failed to construct a tracker, error: %v", err) + } + + // manually save a correct entry to fs + nowstamp := time.Now().Format(time.RFC3339) + uid := "uid" + expect := &Entry{ + Time: nowstamp, + Reason: "reason", + } + m := map[string]Entry{uid: *expect} + err = tracker.save(m) + if err != nil { + t.Fatalf("failed to save bad-config data, error: %v", err) + } + + // should return nil for entries that don't exist + bogus := "bogus-uid" + e, err := tracker.Entry(bogus) + if err != nil { + t.Errorf("expect nil for entries that don't exist (uid: %q), but got error: %v", bogus, err) + } else if e != nil { + t.Errorf("expect nil for entries that don't exist (uid: %q), but got %#v", bogus, e) + } + + // should return non-nil for entries that exist + e, err = tracker.Entry(uid) + if err != nil { + t.Errorf("expect non-nil for entries that exist (uid: %q), but got error: %v", uid, err) + } else if e == nil { + t.Errorf("expect non-nil for entries that exist (uid: %q), but got nil", uid) + } else if !reflect.DeepEqual(expect, e) { + // entry should match what we inserted for the given UID + t.Errorf("expect entry for uid %q to match %#v, but got %#v", uid, expect, e) + } +} + +// TODO(mtaufen): test loading invalid json (see startups/fstracker_test.go for example) +func TestFsTrackerLoad(t *testing.T) { + tracker, err := newInitializedFakeFsTracker() + if err != nil { + t.Fatalf("failed to construct a tracker, error: %v", err) + } + + uid := "uid" + nowstamp := time.Now().Format(time.RFC3339) + cases := []struct { + desc string + data []byte + expect map[string]Entry + err string + }{ + // empty file + {"empty file", []byte(""), map[string]Entry{}, ""}, + // empty map + {"empty map", []byte("{}"), map[string]Entry{}, ""}, + // valid json + {"valid json", []byte(fmt.Sprintf(`{"%s":{"time":"%s","reason":"reason"}}`, uid, nowstamp)), + map[string]Entry{uid: { + Time: nowstamp, + Reason: "reason", + }}, ""}, + // invalid json + {"invalid json", []byte(`*`), map[string]Entry{}, "failed to unmarshal"}, + } + + for _, c := range cases { + // save a file containing the correct serialization + utilfiles.ReplaceFile(tracker.fs, filepath.Join(testTrackingDir, badConfigsFile), c.data) + + // loading valid json should result in an object with the correct values + m, err := tracker.load() + if utiltest.SkipRest(t, c.desc, err, c.err) { + continue + } + if !reflect.DeepEqual(c.expect, m) { + // m should equal expected decoded object + t.Errorf("case %q, expect %#v but got %#v", c.desc, c.expect, m) + } + } +} + +func TestFsTrackerSave(t *testing.T) { + tracker, err := newInitializedFakeFsTracker() + if err != nil { + t.Fatalf("failed to construct a tracker, error: %v", err) + } + + uid := "uid" + nowstamp := time.Now().Format(time.RFC3339) + cases := []struct { + desc string + m map[string]Entry + expect string + err string + }{ + // empty map + {"empty map", map[string]Entry{}, "{}", ""}, + // 1-entry map + {"1-entry map", + map[string]Entry{uid: { + Time: nowstamp, + Reason: "reason", + }}, + fmt.Sprintf(`{"%s":{"time":"%s","reason":"reason"}}`, uid, nowstamp), ""}, + } + + for _, c := range cases { + if err := tracker.save(c.m); utiltest.SkipRest(t, c.desc, err, c.err) { + continue + } + + data, err := tracker.fs.ReadFile(filepath.Join(testTrackingDir, badConfigsFile)) + if err != nil { + t.Fatalf("failed to read bad-config file, error: %v", err) + } + json := string(data) + + if json != c.expect { + t.Errorf("case %q, expect %q but got %q", c.desc, c.expect, json) + } + } +} + +func TestFsTrackerRoundTrip(t *testing.T) { + tracker, err := newInitializedFakeFsTracker() + if err != nil { + t.Fatalf("failed to construct a tracker, error: %v", err) + } + + nowstamp := time.Now().Format(time.RFC3339) + uid := "uid" + expect := map[string]Entry{uid: { + Time: nowstamp, + Reason: "reason", + }} + + // test that saving and loading an object results in the same value + err = tracker.save(expect) + if err != nil { + t.Fatalf("failed to save bad-config data, error: %v", err) + } + after, err := tracker.load() + if err != nil { + t.Fatalf("failed to load bad-config data, error: %v", err) + } + if !reflect.DeepEqual(expect, after) { + t.Errorf("expect round-tripping %#v to result in the same value, but got %#v", expect, after) + } +} diff --git a/pkg/kubelet/kubeletconfig/checkpoint/BUILD b/pkg/kubelet/kubeletconfig/checkpoint/BUILD new file mode 100644 index 0000000000..fe4b1dc1cc --- /dev/null +++ b/pkg/kubelet/kubeletconfig/checkpoint/BUILD @@ -0,0 +1,71 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = [ + "checkpoint_test.go", + "configmap_test.go", + "download_test.go", + ], + library = ":go_default_library", + tags = ["automanaged"], + deps = [ + "//pkg/api:go_default_library", + "//pkg/apis/componentconfig:go_default_library", + "//pkg/apis/componentconfig/v1alpha1:go_default_library", + "//pkg/kubelet/kubeletconfig/util/codec:go_default_library", + "//pkg/kubelet/kubeletconfig/util/test:go_default_library", + "//vendor/github.com/davecgh/go-spew/spew:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = [ + "checkpoint.go", + "configmap.go", + "download.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/api:go_default_library", + "//pkg/apis/componentconfig:go_default_library", + "//pkg/kubelet/kubeletconfig/util/codec:go_default_library", + "//pkg/kubelet/kubeletconfig/util/log:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/kubelet/kubeletconfig/checkpoint/store:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/pkg/kubelet/kubeletconfig/checkpoint/checkpoint.go b/pkg/kubelet/kubeletconfig/checkpoint/checkpoint.go new file mode 100644 index 0000000000..20a8a853e7 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/checkpoint/checkpoint.go @@ -0,0 +1,71 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package checkpoint + +import ( + "fmt" + + apiv1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/componentconfig" +) + +// Checkpoint represents a local copy of a config source (payload) object +type Checkpoint interface { + // UID returns the UID of the config source object behind the Checkpoint + UID() string + // Parse parses the checkpoint into the internal KubeletConfiguration type + Parse() (*componentconfig.KubeletConfiguration, error) + // Encode returns a []byte representation of the config source object behind the Checkpoint + Encode() ([]byte, error) + + // object returns the underlying checkpointed object. If you want to compare sources for equality, use EqualCheckpoints, + // which compares the underlying checkpointed objects for semantic API equality. + object() interface{} +} + +// DecodeCheckpoint is a helper for using the apimachinery to decode serialized checkpoints +func DecodeCheckpoint(data []byte) (Checkpoint, error) { + // decode the checkpoint + obj, err := runtime.Decode(api.Codecs.UniversalDecoder(), data) + if err != nil { + return nil, fmt.Errorf("failed to decode, error: %v", err) + } + + // TODO(mtaufen): for now we assume we are trying to load a ConfigMap checkpoint, may need to extend this if we allow other checkpoint types + + // convert it to the external ConfigMap type, so we're consistently working with the external type outside of the on-disk representation + cm := &apiv1.ConfigMap{} + err = api.Scheme.Convert(obj, cm, nil) + if err != nil { + return nil, fmt.Errorf("failed to convert decoded object into a v1 ConfigMap, error: %v", err) + } + return NewConfigMapCheckpoint(cm) +} + +// EqualCheckpoints compares two Checkpoints for equality, if their underlying objects are equal, so are the Checkpoints +func EqualCheckpoints(a, b Checkpoint) bool { + if a != nil && b != nil { + return apiequality.Semantic.DeepEqual(a.object(), b.object()) + } + if a == nil && b == nil { + return true + } + return false +} diff --git a/pkg/kubelet/kubeletconfig/checkpoint/checkpoint_test.go b/pkg/kubelet/kubeletconfig/checkpoint/checkpoint_test.go new file mode 100644 index 0000000000..d9cc259524 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/checkpoint/checkpoint_test.go @@ -0,0 +1,89 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package checkpoint + +import ( + "testing" + + "github.com/davecgh/go-spew/spew" + + apiv1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + utilcodec "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec" + utiltest "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/test" +) + +// newUnsupportedEncoded returns an encoding of an object that does not have a Checkpoint implementation +func newUnsupportedEncoded(t *testing.T) []byte { + encoder, err := utilcodec.NewJSONEncoder(apiv1.GroupName) + if err != nil { + t.Fatalf("could not create an encoder, error: %v", err) + } + unsupported := &apiv1.Node{} + data, err := runtime.Encode(encoder, unsupported) + if err != nil { + t.Fatalf("could not encode object, error: %v", err) + } + return data +} + +func TestDecodeCheckpoint(t *testing.T) { + // generate correct Checkpoint for v1/ConfigMap test case + cm, err := NewConfigMapCheckpoint(&apiv1.ConfigMap{ObjectMeta: metav1.ObjectMeta{UID: types.UID("uid")}}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // generate unsupported object encoding for unsupported type test case + unsupported := newUnsupportedEncoded(t) + + // test cases + cases := []struct { + desc string + data []byte + expect Checkpoint // expect a deeply-equal Checkpoint to be returned from Decode + err string // expect error to contain this substring + }{ + // v1/ConfigMap + {"v1/ConfigMap", []byte(`{"apiVersion": "v1","kind": "ConfigMap","metadata": {"uid": "uid"}}`), cm, ""}, + // malformed + {"malformed", []byte("malformed"), nil, "failed to decode"}, + // no UID + {"no UID", []byte(`{"apiVersion": "v1","kind": "ConfigMap"}`), nil, "ConfigMap must have a UID"}, + // well-formed, but unsupported type + {"well-formed, but unsupported encoded type", unsupported, nil, "failed to convert"}, + } + + for _, c := range cases { + cpt, err := DecodeCheckpoint(c.data) + if utiltest.SkipRest(t, c.desc, err, c.err) { + continue + } + // Unfortunately reflect.DeepEqual treats nil data structures as != empty data structures, so + // we have to settle for semantic equality of the underlying checkpointed API objects. + // If additional fields are added to the object that implements the Checkpoint interface, + // they should be added to a named sub-object to facilitate a DeepEquals comparison + // of the extra fields. + // decoded checkpoint should match expected checkpoint + if !apiequality.Semantic.DeepEqual(cpt.object(), c.expect.object()) { + t.Errorf("case %q, expect checkpoint %s but got %s", c.desc, spew.Sdump(c.expect), spew.Sdump(cpt)) + } + } +} diff --git a/pkg/kubelet/kubeletconfig/checkpoint/configmap.go b/pkg/kubelet/kubeletconfig/checkpoint/configmap.go new file mode 100644 index 0000000000..ff26005b21 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/checkpoint/configmap.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package checkpoint + +import ( + "fmt" + + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/apis/componentconfig" + utilcodec "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec" +) + +const configMapConfigKey = "kubelet" + +// configMapCheckpoint implements Checkpoint, backed by a v1/ConfigMap config source object +type configMapCheckpoint struct { + configMap *apiv1.ConfigMap +} + +// NewConfigMapCheckpoint returns a Checkpoint backed by `cm`. `cm` must be non-nil +// and have a non-empty ObjectMeta.UID, or an error will be returned. +func NewConfigMapCheckpoint(cm *apiv1.ConfigMap) (Checkpoint, error) { + if cm == nil { + return nil, fmt.Errorf("ConfigMap must be non-nil to be treated as a Checkpoint") + } else if len(cm.ObjectMeta.UID) == 0 { + return nil, fmt.Errorf("ConfigMap must have a UID to be treated as a Checkpoint") + } + return &configMapCheckpoint{cm}, nil +} + +// UID returns the UID of a configMapCheckpoint +func (c *configMapCheckpoint) UID() string { + return string(c.configMap.UID) +} + +// implements Parse for v1/ConfigMap checkpoints +func (c *configMapCheckpoint) Parse() (*componentconfig.KubeletConfiguration, error) { + const emptyCfgErr = "config was empty, but some parameters are required" + + cm := c.configMap + + if len(cm.Data) == 0 { + return nil, fmt.Errorf(emptyCfgErr) + } + + // TODO(mtaufen): Once the KubeletConfiguration type is decomposed, extend this to a key for each sub-object + config, ok := cm.Data[configMapConfigKey] + if !ok { + return nil, fmt.Errorf("key %q not found in ConfigMap", configMapConfigKey) + } else if len(config) == 0 { + return nil, fmt.Errorf(emptyCfgErr) + } + + return utilcodec.DecodeKubeletConfiguration([]byte(config)) +} + +// Encode encodes a configMapCheckpoint +func (c *configMapCheckpoint) Encode() ([]byte, error) { + cm := c.configMap + encoder, err := utilcodec.NewJSONEncoder(apiv1.GroupName) + if err != nil { + return nil, err + } + data, err := runtime.Encode(encoder, cm) + if err != nil { + return nil, err + } + return data, nil +} + +func (c *configMapCheckpoint) object() interface{} { + return c.configMap +} diff --git a/pkg/kubelet/kubeletconfig/checkpoint/configmap_test.go b/pkg/kubelet/kubeletconfig/checkpoint/configmap_test.go new file mode 100644 index 0000000000..398e7cf992 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/checkpoint/configmap_test.go @@ -0,0 +1,216 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package checkpoint + +import ( + "fmt" + "testing" + + "github.com/davecgh/go-spew/spew" + + apiv1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/componentconfig" + ccv1a1 "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1" + utiltest "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/test" +) + +func TestNewConfigMapCheckpoint(t *testing.T) { + cases := []struct { + desc string + cm *apiv1.ConfigMap + err string + }{ + {"nil v1/ConfigMap", nil, "must be non-nil"}, + {"empty v1/ConfigMap", &apiv1.ConfigMap{}, "must have a UID"}, + {"populated v1/ConfigMap", + &apiv1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + UID: types.UID("uid"), + }, + Data: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, ""}, + } + + for _, c := range cases { + cpt, err := NewConfigMapCheckpoint(c.cm) + if utiltest.SkipRest(t, c.desc, err, c.err) { + continue + } + // underlying object should match the object passed in + if !apiequality.Semantic.DeepEqual(cpt.object(), c.cm) { + t.Errorf("case %q, expect Checkpoint %s but got %s", c.desc, spew.Sdump(c.cm), spew.Sdump(cpt)) + } + } +} + +func TestConfigMapCheckpointUID(t *testing.T) { + cases := []string{"", "uid", "376dfb73-56db-11e7-a01e-42010a800002"} + for _, uidIn := range cases { + cpt := &configMapCheckpoint{ + &apiv1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{UID: types.UID(uidIn)}, + }, + } + // UID method should return the correct value of the UID + uidOut := cpt.UID() + if uidIn != uidOut { + t.Errorf("expect UID() to return %q, but got %q", uidIn, uidOut) + } + } +} + +func TestConfigMapCheckpointParse(t *testing.T) { + // get the built-in default configuration + external := &ccv1a1.KubeletConfiguration{} + api.Scheme.Default(external) + defaultConfig := &componentconfig.KubeletConfiguration{} + err := api.Scheme.Convert(external, defaultConfig, nil) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + cases := []struct { + desc string + cm *apiv1.ConfigMap + expect *componentconfig.KubeletConfiguration + err string + }{ + {"empty data", &apiv1.ConfigMap{}, nil, "config was empty"}, + // missing kubelet key + {"missing kubelet key", &apiv1.ConfigMap{Data: map[string]string{ + "bogus": "stuff"}}, nil, fmt.Sprintf("key %q not found", configMapConfigKey)}, + // invalid format + {"invalid yaml", &apiv1.ConfigMap{Data: map[string]string{ + "kubelet": "*"}}, nil, "failed to decode"}, + {"invalid json", &apiv1.ConfigMap{Data: map[string]string{ + "kubelet": "{*"}}, nil, "failed to decode"}, + // invalid object + {"missing kind", &apiv1.ConfigMap{Data: map[string]string{ + "kubelet": `{"apiVersion":"componentconfig/v1alpha1"}`}}, nil, "failed to decode"}, + {"missing version", &apiv1.ConfigMap{Data: map[string]string{ + "kubelet": `{"kind":"KubeletConfiguration"}`}}, nil, "failed to decode"}, + {"unregistered kind", &apiv1.ConfigMap{Data: map[string]string{ + "kubelet": `{"kind":"BogusKind","apiVersion":"componentconfig/v1alpha1"}`}}, nil, "failed to decode"}, + {"unregistered version", &apiv1.ConfigMap{Data: map[string]string{ + "kubelet": `{"kind":"KubeletConfiguration","apiVersion":"bogusversion"}`}}, nil, "failed to decode"}, + // empty object with correct kind and version should result in the defaults for that kind and version + {"default from yaml", &apiv1.ConfigMap{Data: map[string]string{ + "kubelet": `kind: KubeletConfiguration +apiVersion: componentconfig/v1alpha1`}}, defaultConfig, ""}, + {"default from json", &apiv1.ConfigMap{Data: map[string]string{ + "kubelet": `{"kind":"KubeletConfiguration","apiVersion":"componentconfig/v1alpha1"}`}}, defaultConfig, ""}, + } + for _, c := range cases { + cpt := &configMapCheckpoint{c.cm} + kc, err := cpt.Parse() + if utiltest.SkipRest(t, c.desc, err, c.err) { + continue + } + // we expect the parsed configuration to match what we described in the ConfigMap + if !apiequality.Semantic.DeepEqual(c.expect, kc) { + t.Errorf("case %q, expect config %s but got %s", c.desc, spew.Sdump(c.expect), spew.Sdump(kc)) + } + } +} + +func TestConfigMapCheckpointEncode(t *testing.T) { + // only one case, based on output from the existing encoder, and since + // this is hard to test (key order isn't guaranteed), we should probably + // just stick to this test case and mostly rely on the round-trip test. + cases := []struct { + desc string + cpt *configMapCheckpoint + expect string + }{ + // we expect Checkpoints to be encoded as a json representation of the underlying API object + {"one-key", + &configMapCheckpoint{&apiv1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "one-key"}, + Data: map[string]string{"one": ""}}}, + `{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"one-key","creationTimestamp":null},"data":{"one":""}} +`}, + } + + for _, c := range cases { + data, err := c.cpt.Encode() + // we don't expect any errors from encoding + if utiltest.SkipRest(t, c.desc, err, "") { + continue + } + if string(data) != c.expect { + t.Errorf("case %q, expect encoding %q but got %q", c.desc, c.expect, string(data)) + } + } +} + +func TestConfigMapCheckpointRoundTrip(t *testing.T) { + cases := []struct { + desc string + cpt *configMapCheckpoint + decodeErr string + }{ + // empty data + {"empty data", + &configMapCheckpoint{&apiv1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "empty-data-sha256-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + UID: "uid", + }, + Data: map[string]string{}}}, + ""}, + // two keys + {"two keys", + &configMapCheckpoint{&apiv1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "two-keys-sha256-2bff03d6249c8a9dc9a1436d087c124741361ccfac6615b81b67afcff5c42431", + UID: "uid", + }, + Data: map[string]string{"one": "", "two": "2"}}}, + ""}, + // missing uid + {"missing uid", + &configMapCheckpoint{&apiv1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "two-keys-sha256-2bff03d6249c8a9dc9a1436d087c124741361ccfac6615b81b67afcff5c42431", + UID: "", + }, + Data: map[string]string{"one": "", "two": "2"}}}, + "must have a UID"}, + } + for _, c := range cases { + // we don't expect any errors from encoding + data, err := c.cpt.Encode() + if utiltest.SkipRest(t, c.desc, err, "") { + continue + } + after, err := DecodeCheckpoint(data) + if utiltest.SkipRest(t, c.desc, err, c.decodeErr) { + continue + } + if !apiequality.Semantic.DeepEqual(c.cpt.object(), after.object()) { + t.Errorf("case %q, expect round-trip result %s but got %s", c.desc, spew.Sdump(c.cpt), spew.Sdump(after)) + } + } +} diff --git a/pkg/kubelet/kubeletconfig/checkpoint/download.go b/pkg/kubelet/kubeletconfig/checkpoint/download.go new file mode 100644 index 0000000000..b61b394ab4 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/checkpoint/download.go @@ -0,0 +1,152 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package checkpoint + +import ( + "fmt" + + apiv1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/pkg/api" + utilcodec "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec" + utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log" +) + +// RemoteConfigSource represents a remote config source object that can be downloaded as a Checkpoint +type RemoteConfigSource interface { + // UID returns the UID of the remote config source object + UID() string + // Download downloads the remote config source object returns a Checkpoint backed by the object, + // or a sanitized failure reason and error if the download fails + Download(client clientset.Interface) (Checkpoint, string, error) + // Encode returns a []byte representation of the object behind the RemoteConfigSource + Encode() ([]byte, error) + + // object returns the underlying source object. If you want to compare sources for equality, use EqualRemoteConfigSources, + // which compares the underlying source objects for semantic API equality. + object() interface{} +} + +// NewRemoteConfigSource constructs a RemoteConfigSource from a v1/NodeConfigSource object, or returns +// a sanitized failure reason and an error if the `source` is blatantly invalid. +// You should only call this with a non-nil config source. +func NewRemoteConfigSource(source *apiv1.NodeConfigSource) (RemoteConfigSource, string, error) { + // exactly one subfield of the config source must be non-nil, toady ConfigMapRef is the only reference + if source.ConfigMapRef == nil { + reason := "invalid NodeConfigSource, exactly one subfield must be non-nil, but all were nil" + return nil, reason, fmt.Errorf("%s, NodeConfigSource was: %#v", reason, source) + } + + // validate the NodeConfigSource: + + // at this point we know we're using the ConfigMapRef subfield + ref := source.ConfigMapRef + + // name, namespace, and UID must all be non-empty for ConfigMapRef + if ref.Name == "" || ref.Namespace == "" || string(ref.UID) == "" { + reason := "invalid ObjectReference, all of UID, Name, and Namespace must be specified" + return nil, reason, fmt.Errorf("%s, ObjectReference was: %#v", reason, ref) + } + + return &remoteConfigMap{source}, "", nil +} + +// DecodeRemoteConfigSource is a helper for using the apimachinery to decode serialized RemoteConfigSources; +// e.g. the objects stored in the .cur and .lkg files by checkpoint/store/fsstore.go +func DecodeRemoteConfigSource(data []byte) (RemoteConfigSource, error) { + // decode the remote config source + obj, err := runtime.Decode(api.Codecs.UniversalDecoder(), data) + if err != nil { + return nil, fmt.Errorf("failed to decode, error: %v", err) + } + + // for now we assume we are trying to load an apiv1.NodeConfigSource, + // this may need to be extended if e.g. a new version of the api is born + + // convert it to the external NodeConfigSource type, so we're consistently working with the external type outside of the on-disk representation + cs := &apiv1.NodeConfigSource{} + err = api.Scheme.Convert(obj, cs, nil) + if err != nil { + return nil, fmt.Errorf("failed to convert decoded object into a v1 NodeConfigSource, error: %v", err) + } + source, _, err := NewRemoteConfigSource(cs) + return source, err +} + +// EqualRemoteConfigSources is a helper for comparing remote config sources by +// comparing the underlying API objects for semantic equality. +func EqualRemoteConfigSources(a, b RemoteConfigSource) bool { + if a != nil && b != nil { + return apiequality.Semantic.DeepEqual(a.object(), b.object()) + } + if a == nil && b == nil { + return true + } + return false +} + +// remoteConfigMap implements RemoteConfigSource for v1/ConfigMap config sources +type remoteConfigMap struct { + source *apiv1.NodeConfigSource +} + +func (r *remoteConfigMap) UID() string { + return string(r.source.ConfigMapRef.UID) +} + +func (r *remoteConfigMap) Download(client clientset.Interface) (Checkpoint, string, error) { + var reason string + + uid := string(r.source.ConfigMapRef.UID) + + utillog.Infof("attempting to download ConfigMap with UID %q", uid) + + // get the ConfigMap via namespace/name, there doesn't seem to be a way to get it by UID + cm, err := client.CoreV1().ConfigMaps(r.source.ConfigMapRef.Namespace).Get(r.source.ConfigMapRef.Name, metav1.GetOptions{}) + if err != nil { + reason = fmt.Sprintf("could not download ConfigMap with name %q from namespace %q", r.source.ConfigMapRef.Name, r.source.ConfigMapRef.Namespace) + return nil, reason, fmt.Errorf("%s, error: %v", reason, err) + } + + // ensure that UID matches the UID on the reference, the ObjectReference must be unambiguous + if r.source.ConfigMapRef.UID != cm.UID { + reason = fmt.Sprintf("invalid ObjectReference, UID %q does not match UID of downloaded ConfigMap %q", r.source.ConfigMapRef.UID, cm.UID) + return nil, reason, fmt.Errorf(reason) + } + + utillog.Infof("successfully downloaded ConfigMap with UID %q", uid) + return &configMapCheckpoint{cm}, "", nil +} + +func (r *remoteConfigMap) Encode() ([]byte, error) { + encoder, err := utilcodec.NewJSONEncoder(apiv1.GroupName) + if err != nil { + return nil, err + } + data, err := runtime.Encode(encoder, r.source) + if err != nil { + return nil, err + } + return data, nil +} + +func (r *remoteConfigMap) object() interface{} { + return r.source +} diff --git a/pkg/kubelet/kubeletconfig/checkpoint/download_test.go b/pkg/kubelet/kubeletconfig/checkpoint/download_test.go new file mode 100644 index 0000000000..496353919f --- /dev/null +++ b/pkg/kubelet/kubeletconfig/checkpoint/download_test.go @@ -0,0 +1,134 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package checkpoint + +import ( + "testing" + + "github.com/davecgh/go-spew/spew" + + apiv1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + fakeclient "k8s.io/client-go/kubernetes/fake" + utiltest "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/test" +) + +func TestNewRemoteConfigSource(t *testing.T) { + cases := []struct { + desc string + source *apiv1.NodeConfigSource + expect RemoteConfigSource + err string + }{ + // all NodeConfigSource subfields nil + {"all NodeConfigSource subfields nil", + &apiv1.NodeConfigSource{}, nil, "exactly one subfield must be non-nil"}, + {"ConfigMapRef: empty name, namespace, and UID", + &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{}}, nil, "invalid ObjectReference"}, + // ConfigMapRef: empty name and namespace + {"ConfigMapRef: empty name and namespace", + &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{UID: "uid"}}, nil, "invalid ObjectReference"}, + // ConfigMapRef: empty name and UID + {"ConfigMapRef: empty name and UID", + &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Namespace: "namespace"}}, nil, "invalid ObjectReference"}, + // ConfigMapRef: empty namespace and UID + {"ConfigMapRef: empty namespace and UID", + &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Name: "name"}}, nil, "invalid ObjectReference"}, + // ConfigMapRef: empty UID + {"ConfigMapRef: empty namespace and UID", + &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Name: "name", Namespace: "namespace"}}, nil, "invalid ObjectReference"}, + // ConfigMapRef: empty namespace + {"ConfigMapRef: empty namespace and UID", + &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Name: "name", UID: "uid"}}, nil, "invalid ObjectReference"}, + // ConfigMapRef: empty name + {"ConfigMapRef: empty namespace and UID", + &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Namespace: "namespace", UID: "uid"}}, nil, "invalid ObjectReference"}, + // ConfigMapRef: valid reference + {"ConfigMapRef: valid reference", + &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Name: "name", Namespace: "namespace", UID: "uid"}}, + &remoteConfigMap{&apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Name: "name", Namespace: "namespace", UID: "uid"}}}, ""}, + } + + for _, c := range cases { + src, _, err := NewRemoteConfigSource(c.source) + if utiltest.SkipRest(t, c.desc, err, c.err) { + continue + } + // underlying object should match the object passed in + if !apiequality.Semantic.DeepEqual(c.expect.object(), src.object()) { + t.Errorf("case %q, expect RemoteConfigSource %s but got %s", c.desc, spew.Sdump(c.expect), spew.Sdump(src)) + } + } +} + +func TestRemoteConfigMapUID(t *testing.T) { + cases := []string{"", "uid", "376dfb73-56db-11e7-a01e-42010a800002"} + for _, uidIn := range cases { + cpt := &remoteConfigMap{ + &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Name: "name", Namespace: "namespace", UID: types.UID(uidIn)}}, + } + // UID method should return the correct value of the UID + uidOut := cpt.UID() + if uidIn != uidOut { + t.Errorf("expect UID() to return %q, but got %q", uidIn, uidOut) + } + } +} + +func TestRemoteConfigMapDownload(t *testing.T) { + cm := &apiv1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + UID: "uid", + }} + client := fakeclient.NewSimpleClientset(cm) + + cases := []struct { + desc string + source RemoteConfigSource + expect Checkpoint + err string + }{ + + // object doesn't exist + {"object doesn't exist", + &remoteConfigMap{&apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Name: "bogus", Namespace: "namespace", UID: "bogus"}}}, + nil, "could not download ConfigMap"}, + // UID of downloaded object doesn't match UID of referent found via namespace/name + {"UID is incorrect for namespace/name", + &remoteConfigMap{&apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Name: "name", Namespace: "namespace", UID: "bogus"}}}, + nil, "does not match UID"}, + // successful download + {"object exists and reference is correct", + &remoteConfigMap{&apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Name: "name", Namespace: "namespace", UID: "uid"}}}, + &configMapCheckpoint{cm}, ""}, + } + + for _, c := range cases { + cpt, _, err := c.source.Download(client) + if utiltest.SkipRest(t, c.desc, err, c.err) { + continue + } + // "downloaded" object should match the expected + if !apiequality.Semantic.DeepEqual(c.expect.object(), cpt.object()) { + t.Errorf("case %q, expect Checkpoint %s but got %s", c.desc, spew.Sdump(c.expect), spew.Sdump(cpt)) + } + } +} diff --git a/pkg/kubelet/kubeletconfig/checkpoint/store/BUILD b/pkg/kubelet/kubeletconfig/checkpoint/store/BUILD new file mode 100644 index 0000000000..47f2c00a2f --- /dev/null +++ b/pkg/kubelet/kubeletconfig/checkpoint/store/BUILD @@ -0,0 +1,58 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = [ + "fsstore_test.go", + "store_test.go", + ], + library = ":go_default_library", + tags = ["automanaged"], + deps = [ + "//pkg/kubelet/kubeletconfig/checkpoint:go_default_library", + "//pkg/kubelet/kubeletconfig/util/files:go_default_library", + "//pkg/kubelet/kubeletconfig/util/filesystem:go_default_library", + "//pkg/kubelet/kubeletconfig/util/test:go_default_library", + "//vendor/github.com/davecgh/go-spew/spew:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = [ + "fakestore.go", + "fsstore.go", + "store.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/kubelet/kubeletconfig/checkpoint:go_default_library", + "//pkg/kubelet/kubeletconfig/util/files:go_default_library", + "//pkg/kubelet/kubeletconfig/util/filesystem:go_default_library", + "//pkg/kubelet/kubeletconfig/util/log:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/pkg/kubelet/kubeletconfig/checkpoint/store/fakestore.go b/pkg/kubelet/kubeletconfig/checkpoint/store/fakestore.go new file mode 100644 index 0000000000..cb5d2491b2 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/checkpoint/store/fakestore.go @@ -0,0 +1,76 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store + +import ( + "fmt" + "time" + + "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint" +) + +// so far only implements Current(), LastKnownGood(), SetCurrent(), and SetLastKnownGood() +type fakeStore struct { + current checkpoint.RemoteConfigSource + lastKnownGood checkpoint.RemoteConfigSource +} + +func (s *fakeStore) Initialize() error { + return fmt.Errorf("Initialize method not supported") +} + +func (s *fakeStore) Exists(uid string) (bool, error) { + return false, fmt.Errorf("Exists method not supported") +} + +func (s *fakeStore) Save(c checkpoint.Checkpoint) error { + return fmt.Errorf("Save method not supported") +} + +func (s *fakeStore) Load(uid string) (checkpoint.Checkpoint, error) { + return nil, fmt.Errorf("Load method not supported") +} + +func (s *fakeStore) CurrentModified() (time.Time, error) { + return time.Time{}, fmt.Errorf("CurrentModified method not supported") +} + +func (s *fakeStore) Current() (checkpoint.RemoteConfigSource, error) { + return s.current, nil +} + +func (s *fakeStore) LastKnownGood() (checkpoint.RemoteConfigSource, error) { + return s.lastKnownGood, nil +} + +func (s *fakeStore) SetCurrent(source checkpoint.RemoteConfigSource) error { + s.current = source + return nil +} + +func (s *fakeStore) SetCurrentUpdated(source checkpoint.RemoteConfigSource) (bool, error) { + return setCurrentUpdated(s, source) +} + +func (s *fakeStore) SetLastKnownGood(source checkpoint.RemoteConfigSource) error { + s.lastKnownGood = source + return nil +} + +func (s *fakeStore) Reset() (bool, error) { + return false, fmt.Errorf("Reset method not supported") +} diff --git a/pkg/kubelet/kubeletconfig/checkpoint/store/fsstore.go b/pkg/kubelet/kubeletconfig/checkpoint/store/fsstore.go new file mode 100644 index 0000000000..710683fdf2 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/checkpoint/store/fsstore.go @@ -0,0 +1,163 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store + +import ( + "fmt" + "path/filepath" + "time" + + "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint" + utilfiles "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files" + utilfs "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/filesystem" + utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log" +) + +const ( + curFile = ".cur" + lkgFile = ".lkg" +) + +// fsStore is for tracking checkpoints in the local filesystem, implements Store +type fsStore struct { + // fs is the filesystem to use for storage operations; can be mocked for testing + fs utilfs.Filesystem + // checkpointsDir is the absolute path to the storage directory for fsStore + checkpointsDir string +} + +// NewFsStore returns a Store that saves its data in `checkpointsDir` +func NewFsStore(fs utilfs.Filesystem, checkpointsDir string) Store { + return &fsStore{ + fs: fs, + checkpointsDir: checkpointsDir, + } +} + +func (s *fsStore) Initialize() error { + utillog.Infof("initializing config checkpoints directory %q", s.checkpointsDir) + if err := utilfiles.EnsureDir(s.fs, s.checkpointsDir); err != nil { + return err + } + if err := utilfiles.EnsureFile(s.fs, filepath.Join(s.checkpointsDir, curFile)); err != nil { + return err + } + if err := utilfiles.EnsureFile(s.fs, filepath.Join(s.checkpointsDir, lkgFile)); err != nil { + return err + } + return nil +} + +func (s *fsStore) Exists(uid string) (bool, error) { + ok, err := utilfiles.FileExists(s.fs, filepath.Join(s.checkpointsDir, uid)) + if err != nil { + return false, fmt.Errorf("failed to determine whether checkpoint %q exists, error: %v", uid, err) + } + return ok, nil +} + +func (s *fsStore) Save(c checkpoint.Checkpoint) error { + // encode the checkpoint + data, err := c.Encode() + if err != nil { + return err + } + // save the file + if err := utilfiles.ReplaceFile(s.fs, filepath.Join(s.checkpointsDir, c.UID()), data); err != nil { + return err + } + return nil +} + +func (s *fsStore) Load(uid string) (checkpoint.Checkpoint, error) { + filePath := filepath.Join(s.checkpointsDir, uid) + utillog.Infof("loading configuration from %q", filePath) + + // load the file + data, err := s.fs.ReadFile(filePath) + if err != nil { + return nil, fmt.Errorf("failed to read checkpoint file %q, error: %v", filePath, err) + } + + // decode it + c, err := checkpoint.DecodeCheckpoint(data) + if err != nil { + return nil, fmt.Errorf("failed to decode checkpoint file %q, error: %v", filePath, err) + } + return c, nil +} + +func (s *fsStore) CurrentModified() (time.Time, error) { + path := filepath.Join(s.checkpointsDir, curFile) + info, err := s.fs.Stat(path) + if err != nil { + return time.Time{}, fmt.Errorf("failed to stat %q while checking modification time, error: %v", path, err) + } + return info.ModTime(), nil +} + +func (s *fsStore) Current() (checkpoint.RemoteConfigSource, error) { + return s.sourceFromFile(curFile) +} + +func (s *fsStore) LastKnownGood() (checkpoint.RemoteConfigSource, error) { + return s.sourceFromFile(lkgFile) +} + +func (s *fsStore) SetCurrent(source checkpoint.RemoteConfigSource) error { + return s.setSourceFile(curFile, source) +} + +func (s *fsStore) SetCurrentUpdated(source checkpoint.RemoteConfigSource) (bool, error) { + return setCurrentUpdated(s, source) +} + +func (s *fsStore) SetLastKnownGood(source checkpoint.RemoteConfigSource) error { + return s.setSourceFile(lkgFile, source) +} + +func (s *fsStore) Reset() (bool, error) { + return reset(s) +} + +// sourceFromFile returns the RemoteConfigSource stored in the file at `s.checkpointsDir/relPath`, +// or nil if the file is empty +func (s *fsStore) sourceFromFile(relPath string) (checkpoint.RemoteConfigSource, error) { + path := filepath.Join(s.checkpointsDir, relPath) + data, err := s.fs.ReadFile(path) + if err != nil { + return nil, err + } else if len(data) == 0 { + return nil, nil + } + return checkpoint.DecodeRemoteConfigSource(data) +} + +// set source file replaces the file at `s.checkpointsDir/relPath` with a file containing `source` +func (s *fsStore) setSourceFile(relPath string, source checkpoint.RemoteConfigSource) error { + path := filepath.Join(s.checkpointsDir, relPath) + // if nil, reset the file + if source == nil { + return utilfiles.ReplaceFile(s.fs, path, []byte{}) + } + // encode the source and save it to the file + data, err := source.Encode() + if err != nil { + return err + } + return utilfiles.ReplaceFile(s.fs, path, data) +} diff --git a/pkg/kubelet/kubeletconfig/checkpoint/store/fsstore_test.go b/pkg/kubelet/kubeletconfig/checkpoint/store/fsstore_test.go new file mode 100644 index 0000000000..09c743dc37 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/checkpoint/store/fsstore_test.go @@ -0,0 +1,628 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store + +import ( + "fmt" + "path/filepath" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint" + utilfiles "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files" + utilfs "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/filesystem" + utiltest "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/test" +) + +const testCheckpointsDir = "/test-checkpoints-dir" + +func newInitializedFakeFsStore() (*fsStore, error) { + fs := utilfs.NewFakeFs() + store := NewFsStore(fs, testCheckpointsDir) + if err := store.Initialize(); err != nil { + return nil, err + } + return store.(*fsStore), nil +} + +func TestFsStoreInitialize(t *testing.T) { + store, err := newInitializedFakeFsStore() + if err != nil { + t.Fatalf("fsStore.Initialize() failed with error: %v", err) + } + + // check that testCheckpointsDir exists + _, err = store.fs.Stat(testCheckpointsDir) + if err != nil { + t.Fatalf("expect %q to exist, but stat failed with error: %v", testCheckpointsDir, err) + } + + // check that testCheckpointsDir contains the curFile + curPath := filepath.Join(testCheckpointsDir, curFile) + _, err = store.fs.Stat(curPath) + if err != nil { + t.Fatalf("expect %q to exist, but stat failed with error: %v", curPath, err) + } + + // check that testCheckpointsDir contains the lkgFile + lkgPath := filepath.Join(testCheckpointsDir, lkgFile) + _, err = store.fs.Stat(lkgPath) + if err != nil { + t.Fatalf("expect %q to exist, but stat failed with error: %v", lkgPath, err) + } +} + +func TestFsStoreExists(t *testing.T) { + store, err := newInitializedFakeFsStore() + if err != nil { + t.Fatalf("failed to construct a store, error: %v", err) + } + + // create a checkpoint file; this is enough for an exists check + cpt, err := checkpoint.NewConfigMapCheckpoint(&apiv1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{UID: "uid"}, + }) + if err != nil { + t.Fatalf("could not construct checkpoint, error: %v", err) + } + saveTestCheckpointFile(t, store.fs, cpt) + + cases := []struct { + desc string + uid string // the uid to test + expect bool + err string + }{ + {"exists", "uid", true, ""}, + {"does not exist", "bogus-uid", false, ""}, + } + + for _, c := range cases { + ok, err := store.Exists(c.uid) + if utiltest.SkipRest(t, c.desc, err, c.err) { + continue + } + if c.expect != ok { + t.Errorf("case %q, expect %t but got %t", c.desc, c.expect, ok) + } + } +} + +func TestFsStoreSave(t *testing.T) { + store, err := newInitializedFakeFsStore() + if err != nil { + t.Fatalf("failed to construct a store, error: %v", err) + } + + cpt, err := checkpoint.NewConfigMapCheckpoint(&apiv1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{UID: "uid"}, + }) + if err != nil { + t.Fatalf("could not construct checkpoint, error: %v", err) + } + + // save the checkpoint + err = store.Save(cpt) + if err != nil { + t.Fatalf("unable to save checkpoint, error: %v", err) + } + + // expect the saved checkpoint file to match the encoding of the checkpoint + data, err := cpt.Encode() + if err != nil { + t.Fatalf("unable to encode the checkpoint, error: %v", err) + } + expect := string(data) + + data = readTestCheckpointFile(t, store.fs, cpt.UID()) + cptFile := string(data) + + if expect != cptFile { + t.Errorf("expect %q but got %q", expect, cptFile) + } +} + +func TestFsStoreLoad(t *testing.T) { + store, err := newInitializedFakeFsStore() + if err != nil { + t.Fatalf("failed to construct a store, error: %v", err) + } + + const uid = "uid" + cpt, err := checkpoint.NewConfigMapCheckpoint(&apiv1.ConfigMap{ObjectMeta: metav1.ObjectMeta{UID: types.UID(uid)}}) + if err != nil { + t.Fatalf("unable to construct checkpoint, error: %v", err) + } + + cases := []struct { + desc string + loadUID string + cpt checkpoint.Checkpoint + err string + }{ + {"checkpoint exists", uid, cpt, ""}, + {"checkpoint does not exist", "bogus-uid", nil, "failed to read"}, + } + for _, c := range cases { + if c.cpt != nil { + saveTestCheckpointFile(t, store.fs, c.cpt) + } + cpt, err := store.Load(c.loadUID) + if utiltest.SkipRest(t, c.desc, err, c.err) { + continue + } + if !checkpoint.EqualCheckpoints(c.cpt, cpt) { + t.Errorf("case %q, expect %q but got %q", c.desc, spew.Sdump(c.cpt), spew.Sdump(cpt)) + } + } +} + +func TestFsStoreRoundTrip(t *testing.T) { + store, err := newInitializedFakeFsStore() + if err != nil { + t.Fatalf("failed to construct a store, error: %v", err) + } + const uid = "uid" + cpt, err := checkpoint.NewConfigMapCheckpoint(&apiv1.ConfigMap{ObjectMeta: metav1.ObjectMeta{UID: types.UID(uid)}}) + if err != nil { + t.Fatalf("unable to construct checkpoint, error: %v", err) + } + err = store.Save(cpt) + if err != nil { + t.Fatalf("unable to save checkpoint, error: %v", err) + } + cptAfter, err := store.Load(uid) + if err != nil { + t.Fatalf("unable to load checkpoint, error: %v", err) + } + if !checkpoint.EqualCheckpoints(cpt, cptAfter) { + t.Errorf("expect %q but got %q", spew.Sdump(cpt), spew.Sdump(cptAfter)) + } +} + +func TestFsStoreCurrentModified(t *testing.T) { + store, err := newInitializedFakeFsStore() + if err != nil { + t.Fatalf("failed to construct a store, error: %v", err) + } + + // create an empty current file, this is good enough for testing + saveTestSourceFile(t, store.fs, curFile, nil) + + // set the timestamps to the current time, so we can compare to result of store.SetCurrentModified + now := time.Now() + err = store.fs.Chtimes(filepath.Join(testCheckpointsDir, curFile), now, now) + if err != nil { + t.Fatalf("could not change timestamps, error: %v", err) + } + + // for now we hope that the system won't truncate the time to a less precise unit, + // if this test fails on certain systems that may be the reason. + modTime, err := store.CurrentModified() + if err != nil { + t.Fatalf("unable to determine modification time of current config source, error: %v", err) + } + if !now.Equal(modTime) { + t.Errorf("expect %q but got %q", now.Format(time.RFC3339), modTime.Format(time.RFC3339)) + } +} + +func TestFsStoreCurrent(t *testing.T) { + store, err := newInitializedFakeFsStore() + if err != nil { + t.Fatalf("failed to construct a store, error: %v", err) + } + + source, _, err := checkpoint.NewRemoteConfigSource(&apiv1.NodeConfigSource{ + ConfigMapRef: &apiv1.ObjectReference{Name: "name", Namespace: "namespace", UID: "uid"}}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + cases := []struct { + desc string + expect checkpoint.RemoteConfigSource + err string + }{ + {"default source", nil, ""}, + {"non-default source", source, ""}, + } + for _, c := range cases { + // save the last known good source + saveTestSourceFile(t, store.fs, curFile, c.expect) + + // load last-known-good and compare to expected result + source, err := store.Current() + if utiltest.SkipRest(t, c.desc, err, c.err) { + continue + } + if !checkpoint.EqualRemoteConfigSources(c.expect, source) { + t.Errorf("case %q, expect %q but got %q", spew.Sdump(c.expect), spew.Sdump(c.expect), spew.Sdump(source)) + } + } +} + +func TestFsStoreLastKnownGood(t *testing.T) { + store, err := newInitializedFakeFsStore() + if err != nil { + t.Fatalf("failed to construct a store, error: %v", err) + } + + source, _, err := checkpoint.NewRemoteConfigSource(&apiv1.NodeConfigSource{ + ConfigMapRef: &apiv1.ObjectReference{Name: "name", Namespace: "namespace", UID: "uid"}}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + cases := []struct { + desc string + expect checkpoint.RemoteConfigSource + err string + }{ + {"default source", nil, ""}, + {"non-default source", source, ""}, + } + for _, c := range cases { + // save the last known good source + saveTestSourceFile(t, store.fs, lkgFile, c.expect) + + // load last-known-good and compare to expected result + source, err := store.LastKnownGood() + if utiltest.SkipRest(t, c.desc, err, c.err) { + continue + } + if !checkpoint.EqualRemoteConfigSources(c.expect, source) { + t.Errorf("case %q, expect %q but got %q", spew.Sdump(c.expect), spew.Sdump(c.expect), spew.Sdump(source)) + } + } +} + +func TestFsStoreSetCurrent(t *testing.T) { + store, err := newInitializedFakeFsStore() + if err != nil { + t.Fatalf("failed to construct a store, error: %v", err) + } + + const uid = "uid" + expect := fmt.Sprintf(`{"kind":"NodeConfigSource","apiVersion":"v1","configMapRef":{"namespace":"namespace","name":"name","uid":"%s"}}%s`, uid, "\n") + source, _, err := checkpoint.NewRemoteConfigSource(&apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{ + Name: "name", Namespace: "namespace", UID: types.UID(uid)}}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // save the current source + if err := store.SetCurrent(source); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // check that the source saved as we would expect + data := readTestSourceFile(t, store.fs, curFile) + if expect != string(data) { + t.Errorf("expect current source file to contain %q, but got %q", expect, string(data)) + } +} + +func TestFsStoreSetCurrentUpdated(t *testing.T) { + store, err := newInitializedFakeFsStore() + if err != nil { + t.Fatalf("failed to construct a store, error: %v", err) + } + + cases := []struct { + current string + newCurrent string + expectUpdated bool + err string + }{ + {"", "", false, ""}, + {"uid", "", true, ""}, + {"", "uid", true, ""}, + {"uid", "uid", false, ""}, + {"uid", "other-uid", true, ""}, + {"other-uid", "uid", true, ""}, + {"other-uid", "other-uid", false, ""}, + } + + for _, c := range cases { + // construct current source + var source checkpoint.RemoteConfigSource + expectSource := "" + if len(c.current) > 0 { + expectSource = fmt.Sprintf(`{"kind":"NodeConfigSource","apiVersion":"v1","configMapRef":{"namespace":"namespace","name":"name","uid":"%s"}}%s`, c.current, "\n") + source, _, err = checkpoint.NewRemoteConfigSource(&apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{ + Name: "name", Namespace: "namespace", UID: types.UID(c.current)}}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + } + + // construct new source + var newSource checkpoint.RemoteConfigSource + expectNewSource := "" + if len(c.newCurrent) > 0 { + expectNewSource = fmt.Sprintf(`{"kind":"NodeConfigSource","apiVersion":"v1","configMapRef":{"namespace":"namespace","name":"new-name","uid":"%s"}}%s`, c.newCurrent, "\n") + newSource, _, err = checkpoint.NewRemoteConfigSource(&apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{ + Name: "new-name", Namespace: "namespace", UID: types.UID(c.newCurrent)}}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + } + + // set the initial current + if err := store.SetCurrent(source); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // update to the new current + updated, err := store.SetCurrentUpdated(newSource) + if utiltest.SkipRest(t, fmt.Sprintf("%q -> %q", c.current, c.newCurrent), err, c.err) { + continue + } + + // check that SetCurrentUpdated correctly reports whether the current checkpoint changed + if c.expectUpdated != updated { + t.Errorf("case %q -> %q, expect %v but got %v", c.current, c.newCurrent, c.expectUpdated, updated) + } + + // check that curFile is saved by SetCurrentUpdated as we expect + data := readTestSourceFile(t, store.fs, curFile) + if c.current == c.newCurrent { + // same UID should leave file unchanged + if expectSource != string(data) { + t.Errorf("case %q -> %q, expect current source file to contain %q, but got %q", c.current, c.newCurrent, expectSource, string(data)) + } + } else if expectNewSource != string(data) { + // otherwise expect the file to change + t.Errorf("case %q -> %q, expect current source file to contain %q, but got %q", c.current, c.newCurrent, expectNewSource, string(data)) + } + } + +} + +func TestFsStoreSetLastKnownGood(t *testing.T) { + store, err := newInitializedFakeFsStore() + if err != nil { + t.Fatalf("failed to construct a store, error: %v", err) + } + + const uid = "uid" + expect := fmt.Sprintf(`{"kind":"NodeConfigSource","apiVersion":"v1","configMapRef":{"namespace":"namespace","name":"name","uid":"%s"}}%s`, uid, "\n") + source, _, err := checkpoint.NewRemoteConfigSource(&apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{ + Name: "name", Namespace: "namespace", UID: types.UID(uid)}}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // save the last known good source + if err := store.SetLastKnownGood(source); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // check that the source saved as we would expect + data := readTestSourceFile(t, store.fs, lkgFile) + if expect != string(data) { + t.Errorf("expect last-known-good source file to contain %q, but got %q", expect, string(data)) + } +} + +func TestFsStoreReset(t *testing.T) { + store, err := newInitializedFakeFsStore() + if err != nil { + t.Fatalf("failed to construct a store, error: %v", err) + } + + source, _, err := checkpoint.NewRemoteConfigSource(&apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Name: "name", Namespace: "namespace", UID: "uid"}}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + otherSource, _, err := checkpoint.NewRemoteConfigSource(&apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Name: "other-name", Namespace: "namespace", UID: "other-uid"}}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + cases := []struct { + desc string + current checkpoint.RemoteConfigSource + lastKnownGood checkpoint.RemoteConfigSource + updated bool + }{ + {"nil -> nil", nil, nil, false}, + {"source -> nil", source, nil, true}, + {"nil -> source", nil, source, false}, + {"source -> source", source, source, true}, + {"source -> otherSource", source, otherSource, true}, + {"otherSource -> source", otherSource, source, true}, + } + for _, c := range cases { + // manually save the sources to their respective files + saveTestSourceFile(t, store.fs, curFile, c.current) + saveTestSourceFile(t, store.fs, lkgFile, c.lastKnownGood) + + // reset + updated, err := store.Reset() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // make sure the files were emptied + if size := testSourceFileSize(t, store.fs, curFile); size > 0 { + t.Errorf("case %q, expect source file %q to be empty but got %d bytes", c.desc, curFile, size) + } + if size := testSourceFileSize(t, store.fs, lkgFile); size > 0 { + t.Errorf("case %q, expect source file %q to be empty but got %d bytes", c.desc, lkgFile, size) + } + + // make sure Current() and LastKnownGood() both return nil + current, err := store.Current() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + lastKnownGood, err := store.LastKnownGood() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if current != nil || lastKnownGood != nil { + t.Errorf("case %q, expect nil for current and last-known-good checkpoints, but still have %q and %q, respectively", + c.desc, current, lastKnownGood) + } + if c.updated != updated { + t.Errorf("case %q, expect reset to return %t, but got %t", c.desc, c.updated, updated) + } + } +} + +func TestFsStoreSourceFromFile(t *testing.T) { + store, err := newInitializedFakeFsStore() + if err != nil { + t.Fatalf("failed to construct a store, error: %v", err) + } + + source, _, err := checkpoint.NewRemoteConfigSource(&apiv1.NodeConfigSource{ + ConfigMapRef: &apiv1.ObjectReference{Name: "name", Namespace: "namespace", UID: "uid"}}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + cases := []struct { + desc string + expect checkpoint.RemoteConfigSource + err string + }{ + {"default source", nil, ""}, + {"non-default source", source, ""}, + } + + const name = "some-source-file" + for _, c := range cases { + saveTestSourceFile(t, store.fs, name, c.expect) + source, err := store.sourceFromFile(name) + if utiltest.SkipRest(t, c.desc, err, c.err) { + continue + } + if !checkpoint.EqualRemoteConfigSources(c.expect, source) { + t.Errorf("case %q, expect %q but got %q", spew.Sdump(c.expect), spew.Sdump(c.expect), spew.Sdump(source)) + } + } +} + +func TestFsStoreSetSourceFile(t *testing.T) { + store, err := newInitializedFakeFsStore() + if err != nil { + t.Fatalf("failed to construct a store, error: %v", err) + } + + source, _, err := checkpoint.NewRemoteConfigSource(&apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Name: "name", Namespace: "namespace", UID: "uid"}}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + cases := []struct { + source checkpoint.RemoteConfigSource + }{ + {nil}, + {source}, + } + + const name = "some-source-file" + for _, c := range cases { + // set the source file + err := store.setSourceFile(name, c.source) + if err != nil { + t.Fatalf("unable to set source file, error: %v", err) + } + // read back the file + data := readTestSourceFile(t, store.fs, name) + str := string(data) + + if c.source != nil { + // expect the contents to match the encoding of the source + data, err := c.source.Encode() + expect := string(data) + if err != nil { + t.Fatalf("couldn't encode source, error: %v", err) + } + if expect != str { + t.Errorf("case %q, expect %q but got %q", spew.Sdump(c.source), expect, str) + } + } else { + // expect empty file + expect := "" + if expect != str { + t.Errorf("case %q, expect %q but got %q", spew.Sdump(c.source), expect, str) + } + } + } +} + +func readTestCheckpointFile(t *testing.T, fs utilfs.Filesystem, uid string) []byte { + data, err := fs.ReadFile(filepath.Join(testCheckpointsDir, uid)) + if err != nil { + t.Fatalf("unable to read test checkpoint file, error: %v", err) + } + return data +} + +func saveTestCheckpointFile(t *testing.T, fs utilfs.Filesystem, cpt checkpoint.Checkpoint) { + data, err := cpt.Encode() + if err != nil { + t.Fatalf("unable to encode test checkpoint, error: %v", err) + } + fmt.Println(cpt.UID()) + err = utilfiles.ReplaceFile(fs, filepath.Join(testCheckpointsDir, cpt.UID()), data) + if err != nil { + t.Fatalf("unable to save test checkpoint file, error: %v", err) + } +} + +func readTestSourceFile(t *testing.T, fs utilfs.Filesystem, relPath string) []byte { + data, err := fs.ReadFile(filepath.Join(testCheckpointsDir, relPath)) + if err != nil { + t.Fatalf("unable to read test source file, error: %v", err) + } + return data +} + +func saveTestSourceFile(t *testing.T, fs utilfs.Filesystem, relPath string, source checkpoint.RemoteConfigSource) { + if source != nil { + data, err := source.Encode() + if err != nil { + t.Fatalf("unable to save test source file, error: %v", err) + } + err = utilfiles.ReplaceFile(fs, filepath.Join(testCheckpointsDir, relPath), data) + if err != nil { + t.Fatalf("unable to save test source file, error: %v", err) + } + } else { + err := utilfiles.ReplaceFile(fs, filepath.Join(testCheckpointsDir, relPath), []byte{}) + if err != nil { + t.Fatalf("unable to save test source file, error: %v", err) + } + } +} + +func testSourceFileSize(t *testing.T, fs utilfs.Filesystem, relPath string) int64 { + info, err := fs.Stat(filepath.Join(testCheckpointsDir, relPath)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + return info.Size() +} diff --git a/pkg/kubelet/kubeletconfig/checkpoint/store/store.go b/pkg/kubelet/kubeletconfig/checkpoint/store/store.go new file mode 100644 index 0000000000..a6bdb736b2 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/checkpoint/store/store.go @@ -0,0 +1,84 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store + +import ( + "fmt" + "time" + + "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint" +) + +// Store saves checkpoints and information about which is the current and last-known-good checkpoint to a storage layer +type Store interface { + // Initialize sets up the storage layer + Initialize() error + // Exists returns true if a checkpoint with `uid` exists in the store, false otherwise + Exists(uid string) (bool, error) + // Save saves the checkpoint to the storage layer + Save(c checkpoint.Checkpoint) error + // Load loads the checkpoint with UID `uid` from the storage layer, or returns an error if the checkpoint does not exist + Load(uid string) (checkpoint.Checkpoint, error) + // CurrentModified returns the last time that the current UID was set + CurrentModified() (time.Time, error) + // Current returns the source that points to the current checkpoint, or nil if no current checkpoint is set + Current() (checkpoint.RemoteConfigSource, error) + // LastKnownGood returns the source that points to the last-known-good checkpoint, or nil if no last-known-good checkpoint is set + LastKnownGood() (checkpoint.RemoteConfigSource, error) + // SetCurrent saves the source that points to the current checkpoint, set to nil to unset + SetCurrent(source checkpoint.RemoteConfigSource) error + // SetCurrentUpdated is similar to SetCurrent, but also returns whether the current checkpoint changed as a result + SetCurrentUpdated(source checkpoint.RemoteConfigSource) (bool, error) + // SetLastKnownGood saves the source that points to the last-known-good checkpoint, set to nil to unset + SetLastKnownGood(source checkpoint.RemoteConfigSource) error + // Reset unsets the current and last-known-good UIDs and returns whether the current UID was unset as a result of the reset + Reset() (bool, error) +} + +// reset is a helper for implementing Reset, which can be implemented in terms of Store methods +func reset(s Store) (bool, error) { + if err := s.SetLastKnownGood(nil); err != nil { + return false, fmt.Errorf("failed to reset last-known-good UID in checkpoint store, error: %v", err) + } + updated, err := s.SetCurrentUpdated(nil) + if err != nil { + return false, fmt.Errorf("failed to reset current UID in checkpoint store, error: %v", err) + } + return updated, nil +} + +// setCurrentUpdated is a helper for implementing SetCurrentUpdated, which can be implemented in terms of Store methods +func setCurrentUpdated(s Store, source checkpoint.RemoteConfigSource) (bool, error) { + cur, err := s.Current() + if err != nil { + return false, err + } + + // if both are nil, no need to update + if cur == nil && source == nil { + return false, nil + } + // if UIDs match, no need to update + if (source != nil && cur != nil) && cur.UID() == source.UID() { + return false, nil + } + // update the source + if err := s.SetCurrent(source); err != nil { + return false, err + } + return true, nil +} diff --git a/pkg/kubelet/kubeletconfig/checkpoint/store/store_test.go b/pkg/kubelet/kubeletconfig/checkpoint/store/store_test.go new file mode 100644 index 0000000000..c269d78589 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/checkpoint/store/store_test.go @@ -0,0 +1,96 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store + +import ( + "testing" + + "github.com/davecgh/go-spew/spew" + + apiv1 "k8s.io/api/core/v1" + "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint" +) + +func TestReset(t *testing.T) { + source, _, err := checkpoint.NewRemoteConfigSource(&apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Name: "name", Namespace: "namespace", UID: "uid"}}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + otherSource, _, err := checkpoint.NewRemoteConfigSource(&apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Name: "other-name", Namespace: "namespace", UID: "other-uid"}}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + cases := []struct { + s *fakeStore + updated bool + }{ + {&fakeStore{current: nil, lastKnownGood: nil}, false}, + {&fakeStore{current: source, lastKnownGood: nil}, true}, + {&fakeStore{current: nil, lastKnownGood: source}, false}, + {&fakeStore{current: source, lastKnownGood: source}, true}, + {&fakeStore{current: source, lastKnownGood: otherSource}, true}, + {&fakeStore{current: otherSource, lastKnownGood: source}, true}, + } + for _, c := range cases { + updated, err := reset(c.s) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if c.s.current != nil || c.s.lastKnownGood != nil { + t.Errorf("case %q, expect nil for current and last-known-good checkpoints, but still have %q and %q, respectively", + spew.Sdump(c.s), c.s.current, c.s.lastKnownGood) + } + if c.updated != updated { + t.Errorf("case %q, expect reset to return %t, but got %t", spew.Sdump(c.s), c.updated, updated) + } + } +} + +func TestSetCurrentUpdated(t *testing.T) { + source, _, err := checkpoint.NewRemoteConfigSource(&apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Name: "name", Namespace: "namespace", UID: "uid"}}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + otherSource, _, err := checkpoint.NewRemoteConfigSource(&apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Name: "other-name", Namespace: "namespace", UID: "other-uid"}}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + cases := []struct { + s *fakeStore + newCurrent checkpoint.RemoteConfigSource + updated bool + }{ + {&fakeStore{current: nil}, nil, false}, + {&fakeStore{current: nil}, source, true}, + {&fakeStore{current: source}, source, false}, + {&fakeStore{current: source}, nil, true}, + {&fakeStore{current: source}, otherSource, true}, + } + for _, c := range cases { + current := c.s.current + updated, err := setCurrentUpdated(c.s, c.newCurrent) + if err != nil { + t.Fatalf("case %q -> %q, unexpected error: %v", current, c.newCurrent, err) + } + if c.newCurrent != c.s.current { + t.Errorf("case %q -> %q, expect current UID to be %q, but got %q", current, c.newCurrent, c.newCurrent, c.s.current) + } + if c.updated != updated { + t.Errorf("case %q -> %q, expect setCurrentUpdated to return %t, but got %t", current, c.newCurrent, c.updated, updated) + } + } +} diff --git a/pkg/kubelet/kubeletconfig/configfiles/BUILD b/pkg/kubelet/kubeletconfig/configfiles/BUILD new file mode 100644 index 0000000000..483f4976b5 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/configfiles/BUILD @@ -0,0 +1,32 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["configfiles.go"], + tags = ["automanaged"], + deps = [ + "//pkg/apis/componentconfig:go_default_library", + "//pkg/kubelet/kubeletconfig/util/codec:go_default_library", + "//pkg/kubelet/kubeletconfig/util/filesystem:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/pkg/kubelet/kubeletconfig/configfiles/configfiles.go b/pkg/kubelet/kubeletconfig/configfiles/configfiles.go new file mode 100644 index 0000000000..fad56a80f0 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/configfiles/configfiles.go @@ -0,0 +1,66 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configfiles + +import ( + "fmt" + "path/filepath" + + "k8s.io/kubernetes/pkg/apis/componentconfig" + utilcodec "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec" + utilfs "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/filesystem" +) + +// Loader loads configuration from a storage layer +type Loader interface { + // Load loads and returns the KubeletConfiguration from the storage layer, or an error if a configuration could not be loaded + Load() (*componentconfig.KubeletConfiguration, error) +} + +// fsLoader loads configuration from `configDir` +type fsLoader struct { + // fs is the filesystem where the config files exist; can be mocked for testing + fs utilfs.Filesystem + // configDir is the absolute path to the directory containing the configuration files + configDir string +} + +// NewFSLoader returns a Loader that loads a KubeletConfiguration from the files in `configDir` +func NewFSLoader(fs utilfs.Filesystem, configDir string) Loader { + return &fsLoader{ + fs: fs, + configDir: configDir, + } +} + +func (loader *fsLoader) Load() (*componentconfig.KubeletConfiguration, error) { + errfmt := fmt.Sprintf("failed to load Kubelet config files from %q, error: ", loader.configDir) + "%v" + + // require the config be in a file called "kubelet" + path := filepath.Join(loader.configDir, "kubelet") + data, err := loader.fs.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to read init config file %q, error: %v", path, err) + } + + // no configuration is an error, some parameters are required + if len(data) == 0 { + return nil, fmt.Errorf(errfmt, fmt.Errorf("config file was empty, but some parameters are required")) + } + + return utilcodec.DecodeKubeletConfiguration(data) +} diff --git a/pkg/kubelet/kubeletconfig/configsync.go b/pkg/kubelet/kubeletconfig/configsync.go new file mode 100644 index 0000000000..6f2f74f28c --- /dev/null +++ b/pkg/kubelet/kubeletconfig/configsync.go @@ -0,0 +1,208 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeletconfig + +import ( + "fmt" + "os" + + apiv1 "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint" + utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log" +) + +// pokeConfiSourceWorker tells the worker thread that syncs config sources that work needs to be done +func (cc *Controller) pokeConfigSourceWorker() { + select { + case cc.pendingConfigSource <- true: + default: + } +} + +// syncConfigSource checks if work needs to be done to use a new configuration, and does that work if necessary +func (cc *Controller) syncConfigSource(client clientset.Interface, nodeName string) { + select { + case <-cc.pendingConfigSource: + default: + // no work to be done, return + return + } + + // if the sync fails, we want to retry + var syncerr error + defer func() { + if syncerr != nil { + utillog.Errorf(syncerr.Error()) + cc.pokeConfigSourceWorker() + } + }() + + node, err := latestNode(cc.informer.GetStore(), nodeName) + if err != nil { + reason := "unable to read Node from internal object cache" + cc.configOK.SetFailedSyncCondition(reason) + syncerr = fmt.Errorf("%s, error: %v", reason, err) + return + } + + // check the Node and download any new config + if updated, reason, err := cc.doSyncConfigSource(client, node.Spec.ConfigSource); err != nil { + cc.configOK.SetFailedSyncCondition(reason) + syncerr = fmt.Errorf("%s, error: %v", reason, err) + return + } else if updated { + // TODO(mtaufen): Consider adding a "currently restarting" node condition for this case + utillog.Infof("config updated, Kubelet will restart to begin using new config") + os.Exit(0) + } + + // If we get here: + // - there is no need to restart to update the current config + // - there was no error trying to sync configuration + // - if, previously, there was an error trying to sync configuration, we need to update to the correct condition + errfmt := `sync succeeded but unable to clear "failed to sync" message from ConfigOK, error: %v` + + currentUID := "" + if currentSource, err := cc.checkpointStore.Current(); err != nil { + utillog.Errorf(errfmt, err) + return + } else if currentSource != nil { + currentUID = currentSource.UID() + } + + lkgUID := "" + if lkgSource, err := cc.checkpointStore.LastKnownGood(); err != nil { + utillog.Errorf(errfmt, err) + return + } else if lkgSource != nil { + lkgUID = lkgSource.UID() + } + + currentBadReason := "" + if entry, err := cc.badConfigTracker.Entry(currentUID); err != nil { + utillog.Errorf(errfmt, err) + } else if entry != nil { + currentBadReason = entry.Reason + } + cc.configOK.ClearFailedSyncCondition(currentUID, lkgUID, currentBadReason, cc.initConfig != nil) +} + +// doSyncConfigSource checkpoints and sets the store's current config to the new config or resets config, +// depending on the `source`, and returns whether the current config in the checkpoint store was updated as a result +func (cc *Controller) doSyncConfigSource(client clientset.Interface, source *apiv1.NodeConfigSource) (bool, string, error) { + if source == nil { + utillog.Infof("Node.Spec.ConfigSource is empty, will reset current and last-known-good to defaults") + updated, reason, err := cc.resetConfig() + if err != nil { + return false, reason, err + } + return updated, "", nil + } + + // if the NodeConfigSource is non-nil, download the config + utillog.Infof("Node.Spec.ConfigSource is non-empty, will checkpoint source and update config if necessary") + remote, reason, err := checkpoint.NewRemoteConfigSource(source) + if err != nil { + return false, reason, err + } + reason, err = cc.checkpointConfigSource(client, remote) + if err != nil { + return false, reason, err + } + updated, reason, err := cc.setCurrentConfig(remote) + if err != nil { + return false, reason, err + } + return updated, "", nil +} + +// checkpointConfigSource downloads and checkpoints the object referred to by `source` if the checkpoint does not already exist, +// if a failure occurs, returns a sanitized failure reason and an error +func (cc *Controller) checkpointConfigSource(client clientset.Interface, source checkpoint.RemoteConfigSource) (string, error) { + uid := source.UID() + + // if the checkpoint already exists, skip downloading + if ok, err := cc.checkpointStore.Exists(uid); err != nil { + reason := fmt.Sprintf("unable to determine whether object with UID %q was already checkpointed", uid) + return reason, fmt.Errorf("%s, error: %v", reason, err) + } else if ok { + utillog.Infof("checkpoint already exists for object with UID %q, skipping download", uid) + return "", nil + } + + // download + checkpoint, reason, err := source.Download(client) + if err != nil { + return reason, fmt.Errorf("%s, error: %v", reason, err) + } + + // save + err = cc.checkpointStore.Save(checkpoint) + if err != nil { + reason := fmt.Sprintf("failed to save checkpoint for object with UID %q", checkpoint.UID()) + return reason, fmt.Errorf("%s, error: %v", reason, err) + } + + return "", nil +} + +// setCurrentConfig updates UID of the current checkpoint in the checkpoint store to `uid` and returns whether the +// current UID changed as a result, or a sanitized failure reason and an error. +func (cc *Controller) setCurrentConfig(source checkpoint.RemoteConfigSource) (bool, string, error) { + updated, err := cc.checkpointStore.SetCurrentUpdated(source) + if err != nil { + str := "default" + if source != nil { + str = fmt.Sprintf("object with UID %q", source.UID()) + } + return false, fmt.Sprintf("failed to set current checkpoint to %s", str), err + } + return updated, "", nil +} + +// resetConfig resets the current and last-known-good checkpoints in the checkpoint store to their default values and +// returns whether the current checkpoint changed as a result, or a sanitized failure reason and an error. +func (cc *Controller) resetConfig() (bool, string, error) { + updated, err := cc.checkpointStore.Reset() + if err != nil { + return false, "failed to reset to using local (default or init) config", err + } + return updated, "", nil +} + +// latestNode returns the most recent Node with `nodeName` from `store` +func latestNode(store cache.Store, nodeName string) (*apiv1.Node, error) { + obj, ok, err := store.GetByKey(nodeName) + if err != nil { + err := fmt.Errorf("failed to retrieve Node %q from informer's store, error: %v", nodeName, err) + utillog.Errorf(err.Error()) + return nil, err + } else if !ok { + err := fmt.Errorf("Node %q does not exist in the informer's store, can't sync config source", nodeName) + utillog.Errorf(err.Error()) + return nil, err + } + node, ok := obj.(*apiv1.Node) + if !ok { + err := fmt.Errorf("failed to cast object from informer's store to Node, can't sync config source for Node %q", nodeName) + utillog.Errorf(err.Error()) + return nil, err + } + return node, nil +} diff --git a/pkg/kubelet/kubeletconfig/controller.go b/pkg/kubelet/kubeletconfig/controller.go new file mode 100644 index 0000000000..cc7c36a258 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/controller.go @@ -0,0 +1,350 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeletconfig + +import ( + "fmt" + "path/filepath" + "time" + + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/pkg/apis/componentconfig" + "k8s.io/kubernetes/pkg/apis/componentconfig/validation" + "k8s.io/kubernetes/pkg/version" + + "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/badconfig" + "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store" + "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles" + "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/startups" + "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status" + utilfs "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/filesystem" + utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log" + utilpanic "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/panic" +) + +const ( + badConfigTrackingDir = "bad-config-tracking" + startupTrackingDir = "startup-tracking" + checkpointsDir = "checkpoints" + initConfigDir = "init" +) + +// Controller is the controller which, among other things: +// - loads configuration from disk +// - checkpoints configuration to disk +// - downloads new configuration from the API server +// - validates configuration +// - monitors for potential crash-loops caused by new configurations +// - tracks the last-known-good configuration, and rolls-back to last-known-good when necessary +// For more information, see the proposal: https://github.com/kubernetes/kubernetes/pull/29459 +type Controller struct { + // dynamicConfig, if true, indicates that we should sync config from the API server + dynamicConfig bool + + // defaultConfig is the configuration to use if no initConfig is provided + defaultConfig *componentconfig.KubeletConfiguration + + // initConfig is the unmarshaled init config, this will be loaded by the Controller if an initConfigDir is provided + initConfig *componentconfig.KubeletConfiguration + + // initLoader is for loading the Kubelet's init configuration files from disk + initLoader configfiles.Loader + + // pendingConfigSource; write to this channel to indicate that the config source needs to be synced from the API server + pendingConfigSource chan bool + + // configOK manages the ConfigOK condition that is reported in Node.Status.Conditions + configOK status.ConfigOKCondition + + // informer is the informer that watches the Node object + informer cache.SharedInformer + + // checkpointStore persists config source checkpoints to a storage layer + checkpointStore store.Store + + // badConfigTracker persists bad-config records to a storage layer + badConfigTracker badconfig.Tracker + + // startupTracker persists Kubelet startup records, used for crash-loop detection, to a storage layer + startupTracker startups.Tracker +} + +// NewController constructs a new Controller object and returns it. Directory paths must be absolute. +// If the `initConfigDir` is an empty string, skips trying to load the init config. +// If the `dynamicConfigDir` is an empty string, skips trying to load checkpoints or download new config, +// but will still sync the ConfigOK condition if you call StartSync with a non-nil client. +func NewController(initConfigDir string, dynamicConfigDir string, defaultConfig *componentconfig.KubeletConfiguration) *Controller { + fs := utilfs.DefaultFs{} + + var initLoader configfiles.Loader + if len(initConfigDir) > 0 { + initLoader = configfiles.NewFSLoader(fs, initConfigDir) + } + dynamicConfig := false + if len(dynamicConfigDir) > 0 { + dynamicConfig = true + } + + // Get the current kubelet version; bad-config and startup-tracking information can be kubelet-version specific, + // e.g. a bug that crash loops an old Kubelet under a given config might be fixed in a new Kubelet or vice-versa, + // validation might be relaxed in a new Kubelet, etc. + // We also don't want a change in a file format to break Kubelet upgrades; this makes sure a new kubelet gets + // a fresh dir to put its config health data in. + // Note that config checkpoints use the api machinery to store ConfigMaps, and thus get file format versioning for free. + kubeletVersion := version.Get().String() + + return &Controller{ + dynamicConfig: dynamicConfig, + defaultConfig: defaultConfig, + // channels must have capacity at least 1, since we signal with non-blocking writes + pendingConfigSource: make(chan bool, 1), + configOK: status.NewConfigOKCondition(), + checkpointStore: store.NewFsStore(fs, filepath.Join(dynamicConfigDir, checkpointsDir)), + badConfigTracker: badconfig.NewFsTracker(fs, filepath.Join(dynamicConfigDir, badConfigTrackingDir, kubeletVersion)), + startupTracker: startups.NewFsTracker(fs, filepath.Join(dynamicConfigDir, startupTrackingDir, kubeletVersion)), + initLoader: initLoader, + } +} + +// Bootstrap attempts to return a valid KubeletConfiguration based on the configuration of the Controller, +// or returns an error if no valid configuration could be produced. Bootstrap should be called synchronously before StartSync. +func (cc *Controller) Bootstrap() (*componentconfig.KubeletConfiguration, error) { + utillog.Infof("starting controller") + + // ALWAYS validate the local (default and init) configs. This makes incorrectly provisioned nodes an error. + // These must be valid because they are the foundational last-known-good configs. + utillog.Infof("validating combination of defaults and flags") + if err := validation.ValidateKubeletConfiguration(cc.defaultConfig); err != nil { + return nil, fmt.Errorf("combination of defaults and flags failed validation, error: %v", err) + } + // only attempt to load and validate the init config if the user provided a path + if cc.initLoader != nil { + utillog.Infof("loading init config") + kc, err := cc.initLoader.Load() + if err != nil { + return nil, err + } + // validate the init config + utillog.Infof("validating init config") + if err := validation.ValidateKubeletConfiguration(kc); err != nil { + return nil, fmt.Errorf("failed to validate the init config, error: %v", err) + } + cc.initConfig = kc + } + // Assert: the default and init configs are both valid + + // if dynamic config is disabled, skip trying to load any checkpoints because they won't exist + if !cc.dynamicConfig { + return cc.localConfig(), nil + } + + // assert: now we know that a dynamicConfigDir was provided, and we can rely on that existing + + // make sure the filesystem is set up properly + // TODO(mtaufen): rename this to initializeDynamicConfigDir + if err := cc.initialize(); err != nil { + return nil, err + } + + // record the kubelet startup time, used for crashloop detection + if err := cc.startupTracker.RecordStartup(); err != nil { + return nil, err + } + + // determine UID of the current config source + curUID := "" + if curSource, err := cc.checkpointStore.Current(); err != nil { + return nil, err + } else if curSource != nil { + curUID = curSource.UID() + } + + // if curUID indicates the local config should be used, return the correct one of those + if len(curUID) == 0 { + return cc.localConfig(), nil + } // Assert: we will not use the local configurations, unless we roll back to lkg; curUID is non-empty + + // check whether the current config is marked bad + if entry, err := cc.badConfigTracker.Entry(curUID); err != nil { + return nil, err + } else if entry != nil { + utillog.Infof("current config %q was marked bad for reason %q at time %q", curUID, entry.Reason, entry.Time) + return cc.lkgRollback(entry.Reason) + } + + // TODO(mtaufen): consider re-verifying integrity and re-attempting download when a load/verify/parse/validate + // error happens outside trial period, we already made it past the trial so it's probably filesystem corruption + // or something else scary (unless someone is using a 0-length trial period) + + // load the current config + checkpoint, err := cc.checkpointStore.Load(curUID) + if err != nil { + // TODO(mtaufen): rollback and mark bad for now, but this could reasonably be handled by re-attempting a download, + // it probably indicates some sort of corruption + return cc.badRollback(curUID, fmt.Sprintf(status.CurFailLoadReasonFmt, curUID), fmt.Sprintf("error: %v", err)) + } + + // parse the checkpoint into a KubeletConfiguration + cur, err := checkpoint.Parse() + if err != nil { + return cc.badRollback(curUID, fmt.Sprintf(status.CurFailParseReasonFmt, curUID), fmt.Sprintf("error: %v", err)) + } + + // validate current config + if err := validation.ValidateKubeletConfiguration(cur); err != nil { + return cc.badRollback(curUID, fmt.Sprintf(status.CurFailValidateReasonFmt, curUID), fmt.Sprintf("error: %v", err)) + } + + // check for crash loops if we're still in the trial period + if trial, err := cc.inTrial(cur.ConfigTrialDuration.Duration); err != nil { + return nil, err + } else if trial { + if crashing, err := cc.crashLooping(cur.CrashLoopThreshold); err != nil { + return nil, err + } else if crashing { + return cc.badRollback(curUID, fmt.Sprintf(status.CurFailCrashLoopReasonFmt, curUID), "") + } + } else { + // when the trial period is over, the current config becomes the last-known-good + if err := cc.graduateCurrentToLastKnownGood(); err != nil { + return nil, err + } + } + + // update the status to note that we will use the current config + cc.configOK.Set(fmt.Sprintf(status.CurRemoteMessageFmt, curUID), status.CurRemoteOKReason, apiv1.ConditionTrue) + return cur, nil +} + +// StartSync launches the controller's sync loops if `client` is non-nil and `nodeName` is non-empty. +// It will always start the Node condition reporting loop, and will also start the dynamic conifg sync loops +// if dynamic config is enabled on the controller. If `nodeName` is empty but `client` is non-nil, an error is logged. +func (cc *Controller) StartSync(client clientset.Interface, nodeName string) { + if client == nil { + utillog.Infof("nil client, will not start sync loops") + return + } else if len(nodeName) == 0 { + utillog.Errorf("cannot start sync loops with empty nodeName") + return + } + + // start the ConfigOK condition sync loop + go utilpanic.HandlePanic(func() { + utillog.Infof("starting ConfigOK condition sync loop") + wait.JitterUntil(func() { + cc.configOK.Sync(client, nodeName) + }, 10*time.Second, 0.2, true, wait.NeverStop) + })() + + // only sync to new, remotely provided configurations if dynamic config was enabled + if cc.dynamicConfig { + cc.informer = newSharedNodeInformer(client, nodeName, + cc.onAddNodeEvent, cc.onUpdateNodeEvent, cc.onDeleteNodeEvent) + // start the informer loop + // Rather than use utilruntime.HandleCrash, which doesn't actually crash in the Kubelet, + // we use HandlePanic to manually call the panic handlers and then crash. + // We have a better chance of recovering normal operation if we just restart the Kubelet in the event + // of a Go runtime error. + go utilpanic.HandlePanic(func() { + utillog.Infof("starting Node informer sync loop") + cc.informer.Run(wait.NeverStop) + })() + + // start the config source sync loop + go utilpanic.HandlePanic(func() { + utillog.Infof("starting config source sync loop") + wait.JitterUntil(func() { + cc.syncConfigSource(client, nodeName) + }, 10*time.Second, 0.2, true, wait.NeverStop) + })() + } else { + utillog.Infof("dynamic config not enabled, will not sync to remote config") + } +} + +// initialize makes sure that the storage layers for various controller components are set up correctly +func (cc *Controller) initialize() error { + utillog.Infof("ensuring filesystem is set up correctly") + // initialize local checkpoint storage location + if err := cc.checkpointStore.Initialize(); err != nil { + return err + } + // initialize bad config tracker + if err := cc.badConfigTracker.Initialize(); err != nil { + return err + } + // initialize startup tracker + if err := cc.startupTracker.Initialize(); err != nil { + return err + } + return nil +} + +// localConfig returns the initConfig if it is loaded, otherwise returns the defaultConfig +func (cc *Controller) localConfig() *componentconfig.KubeletConfiguration { + if cc.initConfig != nil { + cc.configOK.Set(status.CurInitMessage, status.CurInitOKReason, apiv1.ConditionTrue) + return cc.initConfig + } + cc.configOK.Set(status.CurDefaultMessage, status.CurDefaultOKReason, apiv1.ConditionTrue) + return cc.defaultConfig +} + +// inTrial returns true if the time elapsed since the last modification of the current config exceeds `trialDur`, false otherwise +func (cc *Controller) inTrial(trialDur time.Duration) (bool, error) { + now := time.Now() + t, err := cc.checkpointStore.CurrentModified() + if err != nil { + return false, err + } + if now.Sub(t) > trialDur { + return true, nil + } + return false, nil +} + +// crashLooping returns true if the number of startups since the last modification of the current config exceeds `threshold`, false otherwise +func (cc *Controller) crashLooping(threshold int32) (bool, error) { + // determine the last time the current config changed + modTime, err := cc.checkpointStore.CurrentModified() + if err != nil { + return false, err + } + // get the number of startups since that modification time + num, err := cc.startupTracker.StartupsSince(modTime) + if err != nil { + return false, err + } + return num > threshold, nil +} + +// graduateCurrentToLastKnownGood sets the last-known-good UID on the checkpointStore +// to the same value as the current UID maintained by the checkpointStore +func (cc *Controller) graduateCurrentToLastKnownGood() error { + curUID, err := cc.checkpointStore.Current() + if err != nil { + return fmt.Errorf("could not graduate last-known-good config to current config, error: %v", err) + } + err = cc.checkpointStore.SetLastKnownGood(curUID) + if err != nil { + return fmt.Errorf("could not graduate last-known-good config to current config, error: %v", err) + } + return nil +} diff --git a/pkg/kubelet/kubeletconfig/rollback.go b/pkg/kubelet/kubeletconfig/rollback.go new file mode 100644 index 0000000000..2769fa9c49 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/rollback.go @@ -0,0 +1,79 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeletconfig + +import ( + "fmt" + + apiv1 "k8s.io/api/core/v1" + "k8s.io/kubernetes/pkg/apis/componentconfig" + "k8s.io/kubernetes/pkg/apis/componentconfig/validation" + "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status" + utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log" +) + +// badRollback makes an entry in the bad-config-tracking file for `uid` with `reason`, and returns the result of rolling back to the last-known-good config +func (cc *Controller) badRollback(uid, reason, detail string) (*componentconfig.KubeletConfiguration, error) { + utillog.Errorf(fmt.Sprintf("%s, %s", reason, detail)) + if err := cc.badConfigTracker.MarkBad(uid, reason); err != nil { + return nil, err + } + return cc.lkgRollback(reason) +} + +// lkgRollback returns a valid last-known-good configuration, and updates the `cc.configOK` condition +// regarding the `reason` for the rollback, or returns an error if a valid last-known-good could not be produced +func (cc *Controller) lkgRollback(reason string) (*componentconfig.KubeletConfiguration, error) { + utillog.Infof("rolling back to last-known-good config") + + lkgUID := "" + if lkgSource, err := cc.checkpointStore.LastKnownGood(); err != nil { + return nil, fmt.Errorf("unable to determine last-known-good config, error: %v", err) + } else if lkgSource != nil { + lkgUID = lkgSource.UID() + } + + // if lkgUID indicates the default should be used, return initConfig or defaultConfig + if len(lkgUID) == 0 { + if cc.initConfig != nil { + cc.configOK.Set(status.LkgInitMessage, reason, apiv1.ConditionFalse) + return cc.initConfig, nil + } + cc.configOK.Set(status.LkgDefaultMessage, reason, apiv1.ConditionFalse) + return cc.defaultConfig, nil + } + + // load + checkpoint, err := cc.checkpointStore.Load(lkgUID) + if err != nil { + return nil, fmt.Errorf("%s, error: %v", fmt.Sprintf(status.LkgFailLoadReasonFmt, lkgUID), err) + } + + // parse + lkg, err := checkpoint.Parse() + if err != nil { + return nil, fmt.Errorf("%s, error: %v", fmt.Sprintf(status.LkgFailParseReasonFmt, lkgUID), err) + } + + // validate + if err := validation.ValidateKubeletConfiguration(lkg); err != nil { + return nil, fmt.Errorf("%s, error: %v", fmt.Sprintf(status.LkgFailValidateReasonFmt, lkgUID), err) + } + + cc.configOK.Set(fmt.Sprintf(status.LkgRemoteMessageFmt, lkgUID), reason, apiv1.ConditionFalse) + return lkg, nil +} diff --git a/pkg/kubelet/kubeletconfig/startups/BUILD b/pkg/kubelet/kubeletconfig/startups/BUILD new file mode 100644 index 0000000000..fbec095a86 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/startups/BUILD @@ -0,0 +1,52 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "fstracker.go", + "startups.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/apis/componentconfig/validation:go_default_library", + "//pkg/kubelet/kubeletconfig/util/files:go_default_library", + "//pkg/kubelet/kubeletconfig/util/filesystem:go_default_library", + "//pkg/kubelet/kubeletconfig/util/log:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) + +go_test( + name = "go_default_test", + srcs = [ + "fstracker_test.go", + "startups_test.go", + ], + library = ":go_default_library", + tags = ["automanaged"], + deps = [ + "//pkg/kubelet/kubeletconfig/util/files:go_default_library", + "//pkg/kubelet/kubeletconfig/util/filesystem:go_default_library", + "//pkg/kubelet/kubeletconfig/util/test:go_default_library", + ], +) diff --git a/pkg/kubelet/kubeletconfig/startups/fstracker.go b/pkg/kubelet/kubeletconfig/startups/fstracker.go new file mode 100644 index 0000000000..1c1e5ce8e3 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/startups/fstracker.go @@ -0,0 +1,127 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package startups + +import ( + "encoding/json" + "fmt" + "path/filepath" + "time" + + utilfiles "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files" + utilfs "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/filesystem" + utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log" +) + +const ( + startupsFile = "startups.json" +) + +// fsTracker tracks startups in the local filesystem +type fsTracker struct { + // fs is the filesystem to use for storage operations; can be mocked for testing + fs utilfs.Filesystem + // trackingDir is the absolute path to the storage directory for fsTracker + trackingDir string +} + +// NewFsTracker returns a Tracker that will store information in the `trackingDir` +func NewFsTracker(fs utilfs.Filesystem, trackingDir string) Tracker { + return &fsTracker{ + fs: fs, + trackingDir: trackingDir, + } +} + +func (tracker *fsTracker) Initialize() error { + utillog.Infof("initializing startups tracking directory %q", tracker.trackingDir) + if err := utilfiles.EnsureDir(tracker.fs, tracker.trackingDir); err != nil { + return err + } + if err := utilfiles.EnsureFile(tracker.fs, filepath.Join(tracker.trackingDir, startupsFile)); err != nil { + return err + } + return nil +} + +func (tracker *fsTracker) RecordStartup() error { + // load the file + ls, err := tracker.load() + if err != nil { + return err + } + + ls = recordStartup(ls) + + // save the file + err = tracker.save(ls) + if err != nil { + return err + } + return nil +} + +func (tracker *fsTracker) StartupsSince(t time.Time) (int32, error) { + // load the startups-tracking file + ls, err := tracker.load() + if err != nil { + return 0, err + } + return startupsSince(ls, t) +} + +// TODO(mtaufen): refactor into encode/decode like in badconfig.go + +// load loads the startups-tracking file from disk +func (tracker *fsTracker) load() ([]string, error) { + path := filepath.Join(tracker.trackingDir, startupsFile) + + // load the file + b, err := tracker.fs.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to load startups-tracking file %q, error: %v", path, err) + } + + // parse json into the slice + ls := []string{} + + // if the file is empty, just return empty slice + if len(b) == 0 { + return ls, nil + } + + // otherwise unmarshal the json + if err := json.Unmarshal(b, &ls); err != nil { + return nil, fmt.Errorf("failed to unmarshal json from startups-tracking file %q, error: %v", path, err) + } + return ls, nil +} + +// save replaces the contents of the startups-tracking file with `ls` +func (tracker *fsTracker) save(ls []string) error { + // marshal the json + b, err := json.Marshal(ls) + if err != nil { + return err + } + // save the file + path := filepath.Join(tracker.trackingDir, startupsFile) + if err := utilfiles.ReplaceFile(tracker.fs, path, b); err != nil { + return err + } + return nil +} diff --git a/pkg/kubelet/kubeletconfig/startups/fstracker_test.go b/pkg/kubelet/kubeletconfig/startups/fstracker_test.go new file mode 100644 index 0000000000..0895c94598 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/startups/fstracker_test.go @@ -0,0 +1,294 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package startups + +import ( + "fmt" + "path/filepath" + "reflect" + "testing" + "time" + + utilfiles "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files" + utilfs "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/filesystem" + utiltest "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/test" +) + +const testTrackingDir = "/test-tracking-dir" + +// TODO(mtaufen): this file reuses a lot of test code from startups_test.go, should consolidate + +func newInitializedFakeFsTracker() (*fsTracker, error) { + fs := utilfs.NewFakeFs() + tracker := NewFsTracker(fs, testTrackingDir) + if err := tracker.Initialize(); err != nil { + return nil, err + } + return tracker.(*fsTracker), nil +} + +func TestFsTrackerInitialize(t *testing.T) { + tracker, err := newInitializedFakeFsTracker() + if err != nil { + t.Fatalf("tracker.Initialize() failed with error: %v", err) + } + + // check that testTrackingDir exists + _, err = tracker.fs.Stat(testTrackingDir) + if err != nil { + t.Fatalf("expect %q to exist, but stat failed with error: %v", testTrackingDir, err) + } + + // check that testTrackingDir contains the startupsFile + path := filepath.Join(testTrackingDir, startupsFile) + _, err = tracker.fs.Stat(path) + if err != nil { + t.Fatalf("expect %q to exist, but stat failed with error: %v", path, err) + } +} + +func TestFsTrackerRecordStartup(t *testing.T) { + tracker, err := newInitializedFakeFsTracker() + if err != nil { + t.Fatalf("failed to construct a tracker, error: %v", err) + } + + now := time.Now() + + fullList := func() []string { + ls := []string{} + for i := maxStartups; i > 0; i-- { + // subtract decreasing amounts so timestamps increase but remain in the past + ls = append(ls, now.Add(-time.Duration(i)*time.Second).Format(time.RFC3339)) + } + return ls + }() + cases := []struct { + desc string + ls []string + expectHead []string // what we expect the first length-1 elements to look like after recording a new timestamp + expectLen int // how long the list should be after recording + }{ + // start empty + { + "start empty", + []string{}, + []string{}, + 1, + }, + // start non-empty + { + "start non-empty", + // subtract 1 so stamps are in the past + []string{now.Add(-1 * time.Second).Format(time.RFC3339)}, + []string{now.Add(-1 * time.Second).Format(time.RFC3339)}, + 2, + }, + // rotate list + { + "rotate list", + // make a slice with len == maxStartups, containing monotonically-increasing timestamps + fullList, + fullList[1:], + maxStartups, + }, + } + + for _, c := range cases { + // save the starting point, record a "startup" time, then load list from fs + if err := tracker.save(c.ls); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if err := tracker.RecordStartup(); err != nil { + t.Fatalf("unexpected error: %v", err) + } + ls, err := tracker.load() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if c.expectLen != len(ls) { + t.Errorf("case %q, expected list %q to have length %d", c.desc, ls, c.expectLen) + } + if !reflect.DeepEqual(c.expectHead, ls[:len(ls)-1]) { + t.Errorf("case %q, expected elements 0 through n-1 of list %q to equal %q", c.desc, ls, c.expectHead) + } + // timestamps should be monotonically increasing (assuming system clock isn't jumping around at least) + if sorted, err := timestampsSorted(ls); err != nil { + t.Fatalf("unexpected error: %v", err) + } else if !sorted { + t.Errorf("case %q, expected monotonically increasing timestamps, but got %q", c.desc, ls) + } + } +} + +func TestFsTrackerStartupsSince(t *testing.T) { + tracker, err := newInitializedFakeFsTracker() + if err != nil { + t.Fatalf("failed to construct a tracker, error: %v", err) + } + + now, err := time.Parse(time.RFC3339, "2017-01-02T15:04:05Z") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + cases := []struct { + desc string + ls []string + expect int32 + err string + }{ + // empty list + {"empty list", []string{}, 0, ""}, + // no startups since + { + "no startups since", + []string{"2014-01-02T15:04:05Z", "2015-01-02T15:04:05Z", "2016-01-02T15:04:05Z"}, + 0, + "", + }, + // 2 startups since + { + "some startups since", + []string{"2016-01-02T15:04:05Z", "2018-01-02T15:04:05Z", "2019-01-02T15:04:05Z"}, + 2, + "", + }, + // all startups since + { + "all startups since", + []string{"2018-01-02T15:04:05Z", "2019-01-02T15:04:05Z", "2020-01-02T15:04:05Z"}, + 3, + "", + }, + // invalid timestamp + {"invalid timestamp", []string{"2018-01-02T15:04:05Z08:00"}, 0, "failed to parse"}, + } + + for _, c := range cases { + if err := tracker.save(c.ls); err != nil { + t.Fatalf("unexected error: %v", err) + } + num, err := tracker.StartupsSince(now) + if utiltest.SkipRest(t, c.desc, err, c.err) { + continue + } + if num != c.expect { + t.Errorf("case %q, expect %d startups but got %d", c.desc, c.expect, num) + } + } +} + +func TestFsTrackerLoad(t *testing.T) { + tracker, err := newInitializedFakeFsTracker() + if err != nil { + t.Fatalf("failed to construct a tracker, error: %v", err) + } + + nowstamp := time.Now().Format(time.RFC3339) + cases := []struct { + desc string + data []byte + expect []string + err string + }{ + // empty file + {"empty file", []byte(""), []string{}, ""}, + // empty list + {"empty list", []byte("[]"), []string{}, ""}, + // valid json + {"valid json", []byte(fmt.Sprintf(`["%s"]`, nowstamp)), []string{nowstamp}, ""}, + // invalid json + {"invalid json", []byte(`*`), []string{}, "failed to unmarshal"}, + } + + for _, c := range cases { + // save a file containing the correct serialization + utilfiles.ReplaceFile(tracker.fs, filepath.Join(testTrackingDir, startupsFile), c.data) + + // loading valid json should result in an object with the correct serialization + ls, err := tracker.load() + if utiltest.SkipRest(t, c.desc, err, c.err) { + continue + } + if !reflect.DeepEqual(c.expect, ls) { + // ls should equal expected decoded object + t.Errorf("case %q, expect %#v but got %#v", c.desc, c.expect, ls) + } + } + +} + +func TestFsTrackerSave(t *testing.T) { + tracker, err := newInitializedFakeFsTracker() + if err != nil { + t.Fatalf("failed to construct a tracker, error: %v", err) + } + + nowstamp := time.Now().Format(time.RFC3339) + cases := []struct { + desc string + ls []string + expect string + err string + }{ + // empty list + {"empty list", []string{}, "[]", ""}, + // 1-entry list + {"valid json", []string{nowstamp}, fmt.Sprintf(`["%s"]`, nowstamp), ""}, + } + + for _, c := range cases { + if err := tracker.save(c.ls); utiltest.SkipRest(t, c.desc, err, c.err) { + continue + } + + data, err := tracker.fs.ReadFile(filepath.Join(testTrackingDir, startupsFile)) + if err != nil { + t.Fatalf("failed to read startups file, error: %v", err) + } + json := string(data) + + if json != c.expect { + t.Errorf("case %q, expect %q but got %q", c.desc, c.expect, json) + } + } +} + +func TestFsTrackerRoundTrip(t *testing.T) { + tracker, err := newInitializedFakeFsTracker() + if err != nil { + t.Fatalf("failed to construct a tracker, error: %v", err) + } + + nowstamp := time.Now().Format(time.RFC3339) + expect := []string{nowstamp} + + // test that saving and loading an object results in the same value + err = tracker.save(expect) + if err != nil { + t.Fatalf("failed to save startups data, error: %v", err) + } + after, err := tracker.load() + if err != nil { + t.Fatalf("failed to load startups data, error: %v", err) + } + if !reflect.DeepEqual(expect, after) { + t.Errorf("expect round-tripping %#v to result in the same value, but got %#v", expect, after) + } +} diff --git a/pkg/kubelet/kubeletconfig/startups/startups.go b/pkg/kubelet/kubeletconfig/startups/startups.go new file mode 100644 index 0000000000..10a69a1b70 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/startups/startups.go @@ -0,0 +1,69 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package startups + +import ( + "fmt" + "time" + + "k8s.io/kubernetes/pkg/apis/componentconfig/validation" +) + +const ( + // we allow one extra startup to account for the startup necessary to update configuration + maxStartups = validation.MaxCrashLoopThreshold + 1 +) + +// Tracker tracks Kubelet startups in a storage layer +type Tracker interface { + // Initialize sets up the storage layer + Initialize() error + // RecordStartup records the current time as a Kubelet startup + RecordStartup() error + // StartupsSince returns the number of Kubelet startus recorded since `t` + StartupsSince(t time.Time) (int32, error) +} + +func startupsSince(ls []string, start time.Time) (int32, error) { + // since the list is append-only we only need to count the number of timestamps since `t` + startups := int32(0) + for _, stamp := range ls { + t, err := time.Parse(time.RFC3339, stamp) + if err != nil { + return 0, fmt.Errorf("failed to parse timestamp while counting startups, error: %v", err) + } + if t.After(start) { + startups++ + } + } + return startups, nil +} + +func recordStartup(ls []string) []string { + // record current time + now := time.Now() + stamp := now.Format(time.RFC3339) // use RFC3339 time format + ls = append(ls, stamp) + + // rotate the slice if necessary + if len(ls) > maxStartups { + ls = ls[1:] + } + + // return the new slice + return ls +} diff --git a/pkg/kubelet/kubeletconfig/startups/startups_test.go b/pkg/kubelet/kubeletconfig/startups/startups_test.go new file mode 100644 index 0000000000..a0eafa68e3 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/startups/startups_test.go @@ -0,0 +1,157 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package startups + +import ( + "reflect" + "testing" + "time" + + utiltest "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/test" +) + +func TestRecordStartup(t *testing.T) { + now := time.Now() + + fullList := func() []string { + ls := []string{} + for i := maxStartups; i > 0; i-- { + // subtract decreasing amounts so timestamps increase but remain in the past + ls = append(ls, now.Add(-time.Duration(i)*time.Second).Format(time.RFC3339)) + } + return ls + }() + cases := []struct { + desc string + ls []string + expectHead []string // what we expect the first length-1 elements to look like after recording a new timestamp + expectLen int // how long the list should be after recording + }{ + // start empty + { + "start empty", + []string{}, + []string{}, + 1, + }, + // start non-empty + { + "start non-empty", + // subtract 1 so stamps are in the past + []string{now.Add(-1 * time.Second).Format(time.RFC3339)}, + []string{now.Add(-1 * time.Second).Format(time.RFC3339)}, + 2, + }, + // rotate list + { + "rotate list", + // make a slice with len == maxStartups, containing monotonically-increasing timestamps + fullList, + fullList[1:], + maxStartups, + }, + } + + for _, c := range cases { + ls := recordStartup(c.ls) + if c.expectLen != len(ls) { + t.Errorf("case %q, expected list %q to have length %d", c.desc, ls, c.expectLen) + } + if !reflect.DeepEqual(c.expectHead, ls[:len(ls)-1]) { + t.Errorf("case %q, expected elements 0 through n-1 of list %q to equal %q", c.desc, ls, c.expectHead) + } + // timestamps should be monotonically increasing (assuming system clock isn't jumping around at least) + if sorted, err := timestampsSorted(ls); err != nil { + t.Fatalf("unexpected error: %v", err) + } else if !sorted { + t.Errorf("case %q, expected monotonically increasing timestamps, but got %q", c.desc, ls) + } + } +} + +func TestStartupsSince(t *testing.T) { + now, err := time.Parse(time.RFC3339, "2017-01-02T15:04:05Z") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + cases := []struct { + desc string + ls []string + expect int32 + err string + }{ + // empty list + {"empty list", []string{}, 0, ""}, + // no startups since + { + "no startups since", + []string{"2014-01-02T15:04:05Z", "2015-01-02T15:04:05Z", "2016-01-02T15:04:05Z"}, + 0, + "", + }, + // 2 startups since + { + "some startups since", + []string{"2016-01-02T15:04:05Z", "2018-01-02T15:04:05Z", "2019-01-02T15:04:05Z"}, + 2, + "", + }, + // all startups since + { + "all startups since", + []string{"2018-01-02T15:04:05Z", "2019-01-02T15:04:05Z", "2020-01-02T15:04:05Z"}, + 3, + "", + }, + // invalid timestamp + {"invalid timestamp", []string{"2018-01-02T15:04:05Z08:00"}, 0, "failed to parse"}, + } + + for _, c := range cases { + num, err := startupsSince(c.ls, now) + if utiltest.SkipRest(t, c.desc, err, c.err) { + continue + } + if num != c.expect { + t.Errorf("case %q, expect %d startups but got %d", c.desc, c.expect, num) + } + } + +} + +// returns true if the timestamps are monotically increasing, false otherwise +func timestampsSorted(ls []string) (bool, error) { + if len(ls) < 2 { + return true, nil + } + prev, err := time.Parse(time.RFC3339, ls[0]) + if err != nil { + return false, err + } + for _, stamp := range ls[1:] { + cur, err := time.Parse(time.RFC3339, stamp) + if err != nil { + return false, err + } + if !cur.After(prev) { + return false, nil + } + prev = cur + } + return true, nil +} diff --git a/pkg/kubelet/kubeletconfig/status/BUILD b/pkg/kubelet/kubeletconfig/status/BUILD new file mode 100644 index 0000000000..80e6d3daca --- /dev/null +++ b/pkg/kubelet/kubeletconfig/status/BUILD @@ -0,0 +1,37 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["status.go"], + tags = ["automanaged"], + deps = [ + "//pkg/api:go_default_library", + "//pkg/kubelet/kubeletconfig/util/equal:go_default_library", + "//pkg/kubelet/kubeletconfig/util/log:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/pkg/kubelet/kubeletconfig/status/status.go b/pkg/kubelet/kubeletconfig/status/status.go new file mode 100644 index 0000000000..f1c351b3c5 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/status/status.go @@ -0,0 +1,306 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package status + +import ( + "fmt" + "strings" + "sync" + "time" + + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kuberuntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/strategicpatch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/pkg/api" + utilequal "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/equal" + utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log" +) + +const ( + configOKType = "ConfigOK" + + // CurDefaultMessage indicates the Kubelet is using it's current config, which is the default + CurDefaultMessage = "using current (default)" + // LkgDefaultMessage indicates the Kubelet is using it's last-known-good config, which is the default + LkgDefaultMessage = "using last-known-good (default)" + + // CurInitMessage indicates the Kubelet is using it's current config, which is from the init config files + CurInitMessage = "using current (init)" + // LkgInitMessage indicates the Kubelet is using it's last-known-good config, which is from the init config files + LkgInitMessage = "using last-known-good (init)" + + // CurRemoteMessageFmt indicates the Kubelet is usin it's current config, which is from an API source + CurRemoteMessageFmt = "using current (UID: %q)" + // LkgRemoteMessageFmt indicates the Kubelet is using it's last-known-good config, which is from an API source + LkgRemoteMessageFmt = "using last-known-good (UID: %q)" + + // CurDefaultOKReason indicates that no init config files were provided + CurDefaultOKReason = "current is set to the local default, and no init config was provided" + // CurInitOKReason indicates that init config files were provided + CurInitOKReason = "current is set to the local default, and an init config was provided" + // CurRemoteOKReason indicates that the config from an API source passed all checks + CurRemoteOKReason = "passed all checks" + + // CurFailLoadReasonFmt indicates that the Kubelet failed to load the current config checkpoint for an API source + CurFailLoadReasonFmt = "failed to load current (UID: %q)" + // CurFailParseReasonFmt indicates that the Kubelet failed to parse the current config checkpoint for an API source + CurFailParseReasonFmt = "failed to parse current (UID: %q)" + // CurFailValidateReasonFmt indicates that the Kubelet failed to validate the current config checkpoint for an API source + CurFailValidateReasonFmt = "failed to validate current (UID: %q)" + // CurFailCrashLoopReasonFmt indicates that the Kubelet experienced a crash loop while using the current config checkpoint for an API source + CurFailCrashLoopReasonFmt = "current failed trial period due to crash loop (UID %q)" + + // LkgFailLoadReasonFmt indicates that the Kubelet failed to load the last-known-good config checkpoint for an API source + LkgFailLoadReasonFmt = "failed to load last-known-good (UID: %q)" + // LkgFailParseReasonFmt indicates that the Kubelet failed to parse the last-known-good config checkpoint for an API source + LkgFailParseReasonFmt = "failed to parse last-known-good (UID: %q)" + // LkgFailValidateReasonFmt indicates that the Kubelet failed to validate the last-known-good config checkpoint for an API source + LkgFailValidateReasonFmt = "failed to validate last-known-good (UID: %q)" + + emptyMessage = "unknown - message not provided" + emptyReason = "unknown - reason not provided" +) + +// ConfigOKCondition represents a ConfigOK NodeCondition +type ConfigOKCondition interface { + // Set sets the Message, Reason, and Status of the condition + Set(message, reason string, status apiv1.ConditionStatus) + // SetFailedSyncCondition sets the condition for when syncing Kubelet config fails + SetFailedSyncCondition(reason string) + // ClearFailedSyncCondition resets ConfigOKCondition to the correct condition for successfully syncing the kubelet config + ClearFailedSyncCondition(current string, lastKnownGood string, currentBadReason string, initConfig bool) + // Sync patches the current condition into the Node identified by `nodeName` + Sync(client clientset.Interface, nodeName string) +} + +// configOKCondition implements ConfigOKCondition +type configOKCondition struct { + // conditionMux is a mutex on the condition, alternate between setting and syncing the condition + conditionMux sync.Mutex + // condition is the current ConfigOK node condition, which will be reported in the Node.status.conditions + condition *apiv1.NodeCondition + // pendingCondition; write to this channel to indicate that ConfigOK needs to be synced to the API server + pendingCondition chan bool +} + +// NewConfigOKCondition returns a new ConfigOKCondition +func NewConfigOKCondition() ConfigOKCondition { + return &configOKCondition{ + // channels must have capacity at least 1, since we signal with non-blocking writes + pendingCondition: make(chan bool, 1), + } +} + +// unsafeSet sets the current state of the condition +// it does not grab the conditionMux lock, so you should generally use setConfigOK unless you need to grab the lock +// at a higher level to synchronize additional operations +func (c *configOKCondition) unsafeSet(message, reason string, status apiv1.ConditionStatus) { + // We avoid an empty Message, Reason, or Status on the condition. Since we use Patch to update conditions, an empty + // field might cause a value from a previous condition to leak through, which can be very confusing. + if len(message) == 0 { + message = emptyMessage + } + if len(reason) == 0 { + reason = emptyReason + } + if len(string(status)) == 0 { + status = apiv1.ConditionUnknown + } + + c.condition = &apiv1.NodeCondition{ + Message: message, + Reason: reason, + Status: status, + Type: configOKType, + } + + c.pokeSyncWorker() +} + +func (c *configOKCondition) Set(message, reason string, status apiv1.ConditionStatus) { + c.conditionMux.Lock() + defer c.conditionMux.Unlock() + c.unsafeSet(message, reason, status) +} + +// SetFailedSyncCondition updates the ConfigOK status to reflect that we failed to sync to the latest config because we couldn't figure out what +// config to use (e.g. due to a malformed reference, a download failure, etc) +func (c *configOKCondition) SetFailedSyncCondition(reason string) { + c.Set(c.condition.Message, fmt.Sprintf("failed to sync, desired config unclear, reason: %s", reason), apiv1.ConditionUnknown) +} + +// ClearFailedSyncCondition resets ConfigOK to the correct condition for the config UIDs +// `current` and `lastKnownGood`, depending on whether current is bad (non-empty `currentBadReason`) +// and whether an init config exists (`initConfig` is true). +func (c *configOKCondition) ClearFailedSyncCondition(current string, + lastKnownGood string, + currentBadReason string, + initConfig bool) { + // since our reason-check relies on c.condition we must manually take the lock and use c.unsafeSet instead of c.Set + c.conditionMux.Lock() + defer c.conditionMux.Unlock() + if strings.Contains(c.condition.Reason, "failed to sync, desired config unclear") { + // if we should report a "current is bad, rolled back" state + if len(currentBadReason) > 0 { + if len(current) == 0 { + if initConfig { + c.unsafeSet(LkgInitMessage, currentBadReason, apiv1.ConditionFalse) + return + } + c.unsafeSet(LkgDefaultMessage, currentBadReason, apiv1.ConditionFalse) + return + } + c.unsafeSet(fmt.Sprintf(LkgRemoteMessageFmt, lastKnownGood), currentBadReason, apiv1.ConditionFalse) + return + } + // if we should report a "current is ok" state + if len(current) == 0 { + if initConfig { + c.unsafeSet(CurInitMessage, CurInitOKReason, apiv1.ConditionTrue) + return + } + c.unsafeSet(CurDefaultMessage, CurDefaultOKReason, apiv1.ConditionTrue) + return + } + c.unsafeSet(fmt.Sprintf(CurRemoteMessageFmt, current), CurRemoteOKReason, apiv1.ConditionTrue) + } +} + +// pokeSyncWorker notes that the ConfigOK condition needs to be synced to the API server +func (c *configOKCondition) pokeSyncWorker() { + select { + case c.pendingCondition <- true: + default: + } +} + +// Sync attempts to sync `c.condition` with the Node object for this Kubelet, +// if syncing fails, an error is logged, and work is queued for retry. +func (c *configOKCondition) Sync(client clientset.Interface, nodeName string) { + select { + case <-c.pendingCondition: + default: + // no work to be done, return + return + } + + // grab the lock + c.conditionMux.Lock() + defer c.conditionMux.Unlock() + + // if the sync fails, we want to retry + var err error + defer func() { + if err != nil { + utillog.Errorf(err.Error()) + c.pokeSyncWorker() + } + }() + + if c.condition == nil { + utillog.Infof("ConfigOK condition is nil, skipping ConfigOK sync") + return + } + + // get the Node so we can check the current condition + node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + if err != nil { + err = fmt.Errorf("could not get Node %q, will not sync ConfigOK condition, error: %v", nodeName, err) + return + } + + // set timestamps + syncTime := metav1.NewTime(time.Now()) + c.condition.LastHeartbeatTime = syncTime + if remote := getConfigOK(node.Status.Conditions); remote == nil || !utilequal.ConfigOKEq(remote, c.condition) { + // update transition time the first time we create the condition, + // or if we are semantically changing the condition + c.condition.LastTransitionTime = syncTime + } else { + // since the conditions are semantically equal, use lastTransitionTime from the condition currently on the Node + // we need to do this because the field will always be represented in the patch generated below, and this copy + // prevents nullifying the field during the patch operation + c.condition.LastTransitionTime = remote.LastTransitionTime + } + + // generate the patch + mediaType := "application/json" + info, ok := kuberuntime.SerializerInfoForMediaType(api.Codecs.SupportedMediaTypes(), mediaType) + if !ok { + err = fmt.Errorf("unsupported media type %q", mediaType) + return + } + versions := api.Registry.EnabledVersionsForGroup(api.GroupName) + if len(versions) == 0 { + err = fmt.Errorf("no enabled versions for group %q", api.GroupName) + return + } + // the "best" version supposedly comes first in the list returned from apiv1.Registry.EnabledVersionsForGroup + encoder := api.Codecs.EncoderForVersion(info.Serializer, versions[0]) + + before, err := kuberuntime.Encode(encoder, node) + if err != nil { + err = fmt.Errorf(`failed to encode "before" node while generating patch, error: %v`, err) + return + } + + patchConfigOK(node, c.condition) + after, err := kuberuntime.Encode(encoder, node) + if err != nil { + err = fmt.Errorf(`failed to encode "after" node while generating patch, error: %v`, err) + return + } + + patch, err := strategicpatch.CreateTwoWayMergePatch(before, after, apiv1.Node{}) + if err != nil { + err = fmt.Errorf("failed to generate patch for updating ConfigOK condition, error: %v", err) + return + } + + // patch the remote Node object + _, err = client.CoreV1().Nodes().PatchStatus(nodeName, patch) + if err != nil { + err = fmt.Errorf("could not update ConfigOK condition, error: %v", err) + return + } +} + +// patchConfigOK replaces or adds the ConfigOK condition to the node +func patchConfigOK(node *apiv1.Node, configOK *apiv1.NodeCondition) { + for i := range node.Status.Conditions { + if node.Status.Conditions[i].Type == configOKType { + // edit the condition + node.Status.Conditions[i] = *configOK + return + } + } + // append the condition + node.Status.Conditions = append(node.Status.Conditions, *configOK) +} + +// getConfigOK returns the first NodeCondition in `cs` with Type == configOKType, +// or if no such condition exists, returns nil. +func getConfigOK(cs []apiv1.NodeCondition) *apiv1.NodeCondition { + for i := range cs { + if cs[i].Type == configOKType { + return &cs[i] + } + } + return nil +} diff --git a/pkg/kubelet/kubeletconfig/util/codec/BUILD b/pkg/kubelet/kubeletconfig/util/codec/BUILD new file mode 100644 index 0000000000..44ef8e5039 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/util/codec/BUILD @@ -0,0 +1,35 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["codec.go"], + tags = ["automanaged"], + deps = [ + "//pkg/api:go_default_library", + "//pkg/api/install:go_default_library", + "//pkg/apis/componentconfig:go_default_library", + "//pkg/apis/componentconfig/install:go_default_library", + "//pkg/apis/componentconfig/v1alpha1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/pkg/kubelet/kubeletconfig/util/codec/codec.go b/pkg/kubelet/kubeletconfig/util/codec/codec.go new file mode 100644 index 0000000000..3046af89c0 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/util/codec/codec.go @@ -0,0 +1,77 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package codec + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/api" + // ensure the core apis are installed + _ "k8s.io/kubernetes/pkg/api/install" + "k8s.io/kubernetes/pkg/apis/componentconfig" + // ensure the componentconfig api group is installed + _ "k8s.io/kubernetes/pkg/apis/componentconfig/install" + ccv1a1 "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1" +) + +// TODO(mtaufen): allow an encoder to be injected into checkpoint objects at creation time? (then we could ultimately instantiate only one encoder) + +// NewJSONEncoder generates a new runtime.Encoder that encodes objects to JSON +func NewJSONEncoder(groupName string) (runtime.Encoder, error) { + // encode to json + mediaType := "application/json" + info, ok := runtime.SerializerInfoForMediaType(api.Codecs.SupportedMediaTypes(), mediaType) + if !ok { + return nil, fmt.Errorf("unsupported media type %q", mediaType) + } + + versions := api.Registry.EnabledVersionsForGroup(groupName) + if len(versions) == 0 { + return nil, fmt.Errorf("no enabled versions for group %q", groupName) + } + + // the "best" version supposedly comes first in the list returned from api.Registry.EnabledVersionsForGroup + return api.Codecs.EncoderForVersion(info.Serializer, versions[0]), nil +} + +// DecodeKubeletConfiguration decodes an encoded (v1alpha1) KubeletConfiguration object to the internal type +func DecodeKubeletConfiguration(data []byte) (*componentconfig.KubeletConfiguration, error) { + // TODO(mtaufen): when KubeletConfiguration moves out of componentconfig, will the UniversalDecoder still work? + // decode the object, note we use the external version scheme to decode, because users provide the external version + obj, err := runtime.Decode(api.Codecs.UniversalDecoder(ccv1a1.SchemeGroupVersion), data) + if err != nil { + return nil, fmt.Errorf("failed to decode, error: %v", err) + } + + externalKC, ok := obj.(*ccv1a1.KubeletConfiguration) + if !ok { + return nil, fmt.Errorf("failed to cast object to KubeletConfiguration, object: %#v", obj) + } + + // TODO(mtaufen): confirm whether api.Codecs.UniversalDecoder runs the defaulting, which would make this redundant + // run the defaulter on the decoded configuration before converting to internal type + api.Scheme.Default(externalKC) + + // convert to internal type + internalKC := &componentconfig.KubeletConfiguration{} + err = api.Scheme.Convert(externalKC, internalKC, nil) + if err != nil { + return nil, err + } + return internalKC, nil +} diff --git a/pkg/kubelet/kubeletconfig/util/equal/BUILD b/pkg/kubelet/kubeletconfig/util/equal/BUILD new file mode 100644 index 0000000000..a39b497715 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/util/equal/BUILD @@ -0,0 +1,28 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["equal.go"], + tags = ["automanaged"], + deps = ["//vendor/k8s.io/api/core/v1:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/pkg/kubelet/kubeletconfig/util/equal/equal.go b/pkg/kubelet/kubeletconfig/util/equal/equal.go new file mode 100644 index 0000000000..ffd12cf6a4 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/util/equal/equal.go @@ -0,0 +1,51 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package equal + +import apiv1 "k8s.io/api/core/v1" + +// ConfigSourceEq returns true if the two config sources are semantically equivalent in the context of dynamic config +func ConfigSourceEq(a, b *apiv1.NodeConfigSource) bool { + if a == b { + return true + } else if a == nil || b == nil { + // not equal, and one is nil + return false + } + // check equality of config source subifelds + if a.ConfigMapRef != b.ConfigMapRef { + return ObjectRefEq(a.ConfigMapRef, b.ConfigMapRef) + } + // all internal subfields of the config soruce are equal + return true +} + +// ObjectRefEq returns true if the two object references are semantically equivalent in the context of dynamic config +func ObjectRefEq(a, b *apiv1.ObjectReference) bool { + if a == b { + return true + } else if a == nil || b == nil { + // not equal, and one is nil + return false + } + return a.UID == b.UID && a.Namespace == b.Namespace && a.Name == b.Name +} + +// ConfigOKEq returns true if the two conditions are semantically equivalent in the context of dynamic config +func ConfigOKEq(a, b *apiv1.NodeCondition) bool { + return a.Message == b.Message && a.Reason == b.Reason && a.Status == b.Status +} diff --git a/pkg/kubelet/kubeletconfig/util/files/BUILD b/pkg/kubelet/kubeletconfig/util/files/BUILD new file mode 100644 index 0000000000..6d0978e464 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/util/files/BUILD @@ -0,0 +1,28 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["files.go"], + tags = ["automanaged"], + deps = ["//pkg/kubelet/kubeletconfig/util/filesystem:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/pkg/kubelet/kubeletconfig/util/files/files.go b/pkg/kubelet/kubeletconfig/util/files/files.go new file mode 100644 index 0000000000..99c7de851d --- /dev/null +++ b/pkg/kubelet/kubeletconfig/util/files/files.go @@ -0,0 +1,124 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package files + +import ( + "fmt" + "os" + "path/filepath" + + utilfs "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/filesystem" +) + +const defaultPerm = 0666 + +// FileExists returns true if a regular file exists at `path`, false if `path` does not exist, otherwise an error +func FileExists(fs utilfs.Filesystem, path string) (bool, error) { + if info, err := fs.Stat(path); err == nil { + if info.Mode().IsRegular() { + return true, nil + } + return false, fmt.Errorf("expected regular file at %q, but mode is %q", path, info.Mode().String()) + } else if os.IsNotExist(err) { + return false, nil + } else { + return false, err + } +} + +// EnsureFile ensures that a regular file exists at `path`, and if it must create the file any +// necessary parent directories will also be created and the new file will be empty. +func EnsureFile(fs utilfs.Filesystem, path string) error { + // if file exists, don't change it, but do report any unexpected errors + if ok, err := FileExists(fs, path); ok || err != nil { + return err + } // Assert: file does not exist + + // create any necessary parents + err := fs.MkdirAll(filepath.Dir(path), defaultPerm) + if err != nil { + return err + } + + // create the file + file, err := fs.Create(path) + if err != nil { + return err + } + // close the file, since we don't intend to use it yet + return file.Close() +} + +// ReplaceFile replaces the contents of the file at `path` with `data` by writing to a tmp file in the same +// dir as `path` and renaming the tmp file over `path`. The file does not have to exist to use ReplaceFile. +func ReplaceFile(fs utilfs.Filesystem, path string, data []byte) error { + dir := filepath.Dir(path) + prefix := filepath.Base(path) + + // create the tmp file + tmpFile, err := fs.TempFile(dir, prefix) + if err != nil { + return err + } + + // Name() will be an absolute path when using utilfs.DefaultFS, because ioutil.TempFile passes + // an absolute path to os.Open, and we ensure similar behavior in utilfs.FakeFS for testing. + tmpPath := tmpFile.Name() + + // write data + if _, err := tmpFile.Write(data); err != nil { + return err + } + if err := tmpFile.Close(); err != nil { + return err + } + + // rename over existing file + if err := fs.Rename(tmpPath, path); err != nil { + return err + } + return nil +} + +// DirExists returns true if a directory exists at `path`, false if `path` does not exist, otherwise an error +func DirExists(fs utilfs.Filesystem, path string) (bool, error) { + if info, err := fs.Stat(path); err == nil { + if info.IsDir() { + return true, nil + } + return false, fmt.Errorf("expected dir at %q, but mode is is %q", path, info.Mode().String()) + } else if os.IsNotExist(err) { + return false, nil + } else { + return false, err + } +} + +// EnsureDir ensures that a directory exists at `path`, and if it must create the directory any +// necessary parent directories will also be created and the new directory will be empty. +func EnsureDir(fs utilfs.Filesystem, path string) error { + // if dir exists, don't change it, but do report any unexpected errors + if ok, err := DirExists(fs, path); ok || err != nil { + return err + } // Assert: dir does not exist + + // create the dir + if err := fs.MkdirAll(path, defaultPerm); err != nil { + return err + } + return nil +} diff --git a/pkg/kubelet/kubeletconfig/util/filesystem/BUILD b/pkg/kubelet/kubeletconfig/util/filesystem/BUILD new file mode 100644 index 0000000000..c85fb2ff22 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/util/filesystem/BUILD @@ -0,0 +1,32 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "defaultfs.go", + "fakefs.go", + "filesystem.go", + ], + tags = ["automanaged"], + deps = ["//vendor/github.com/spf13/afero:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/pkg/kubelet/kubeletconfig/util/filesystem/defaultfs.go b/pkg/kubelet/kubeletconfig/util/filesystem/defaultfs.go new file mode 100644 index 0000000000..3840da0cde --- /dev/null +++ b/pkg/kubelet/kubeletconfig/util/filesystem/defaultfs.go @@ -0,0 +1,89 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filesystem + +import ( + "io/ioutil" + "os" + "time" +) + +// DefaultFs implements Filesystem using same-named functions from "os" and "io/ioutil" +type DefaultFs struct{} + +// Stat via os.Stat +func (DefaultFs) Stat(name string) (os.FileInfo, error) { + return os.Stat(name) +} + +// Create via os.Create +func (DefaultFs) Create(name string) (File, error) { + file, err := os.Create(name) + if err != nil { + return nil, err + } + return &defaultFile{file}, nil +} + +// Rename via os.Rename +func (DefaultFs) Rename(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} + +// MkdirAll via os.MkdirAll +func (DefaultFs) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +// Chtimes via os.Chtimes +func (DefaultFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return os.Chtimes(name, atime, mtime) +} + +// ReadFile via os.ReadFile +func (DefaultFs) ReadFile(filename string) ([]byte, error) { + return ioutil.ReadFile(filename) +} + +// TempFile via os.TempFile +func (DefaultFs) TempFile(dir, prefix string) (File, error) { + file, err := ioutil.TempFile(dir, prefix) + if err != nil { + return nil, err + } + return &defaultFile{file}, nil +} + +// defaultFile implements File using same-named functions from "os" +type defaultFile struct { + file *os.File +} + +// Name via os.File.Name +func (file *defaultFile) Name() string { + return file.file.Name() +} + +// Write via os.File.Write +func (file *defaultFile) Write(b []byte) (n int, err error) { + return file.file.Write(b) +} + +// Close via os.File.Close +func (file *defaultFile) Close() error { + return file.file.Close() +} diff --git a/pkg/kubelet/kubeletconfig/util/filesystem/fakefs.go b/pkg/kubelet/kubeletconfig/util/filesystem/fakefs.go new file mode 100644 index 0000000000..7bbce8cc5a --- /dev/null +++ b/pkg/kubelet/kubeletconfig/util/filesystem/fakefs.go @@ -0,0 +1,97 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filesystem + +import ( + "os" + "time" + + "github.com/spf13/afero" +) + +// fakeFs is implemented in terms of afero +type fakeFs struct { + a afero.Afero +} + +// NewFakeFs returns a fake Filesystem that exists in-memory, useful for unit tests +func NewFakeFs() Filesystem { + return &fakeFs{a: afero.Afero{Fs: afero.NewMemMapFs()}} +} + +// Stat via afero.Fs.Stat +func (fs *fakeFs) Stat(name string) (os.FileInfo, error) { + return fs.a.Fs.Stat(name) +} + +// Create via afero.Fs.Create +func (fs *fakeFs) Create(name string) (File, error) { + file, err := fs.a.Fs.Create(name) + if err != nil { + return nil, err + } + return &fakeFile{file}, nil +} + +// Rename via afero.Fs.Rename +func (fs *fakeFs) Rename(oldpath, newpath string) error { + return fs.a.Fs.Rename(oldpath, newpath) +} + +// MkdirAll via afero.Fs.MkdirAll +func (fs *fakeFs) MkdirAll(path string, perm os.FileMode) error { + return fs.a.Fs.MkdirAll(path, perm) +} + +// Chtimes via afero.Fs.Chtimes +func (fs *fakeFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return fs.a.Fs.Chtimes(name, atime, mtime) +} + +// ReadFile via afero.Fs.ReadFile +func (fs *fakeFs) ReadFile(filename string) ([]byte, error) { + return fs.a.ReadFile(filename) +} + +// TempFile via afero.Fs.TempFile +func (fs *fakeFs) TempFile(dir, prefix string) (File, error) { + file, err := fs.a.TempFile(dir, prefix) + if err != nil { + return nil, err + } + return &fakeFile{file}, nil +} + +// fakeFile implements File; for use with fakeFs +type fakeFile struct { + file afero.File +} + +// Name via afero.File.Name +func (file *fakeFile) Name() string { + return file.file.Name() +} + +// Write via afero.File.Write +func (file *fakeFile) Write(b []byte) (n int, err error) { + return file.file.Write(b) +} + +// Close via afero.File.Close +func (file *fakeFile) Close() error { + return file.file.Close() +} diff --git a/pkg/kubelet/kubeletconfig/util/filesystem/filesystem.go b/pkg/kubelet/kubeletconfig/util/filesystem/filesystem.go new file mode 100644 index 0000000000..2396681faf --- /dev/null +++ b/pkg/kubelet/kubeletconfig/util/filesystem/filesystem.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filesystem + +import ( + "os" + "time" +) + +// Filesystem is an interface that we can use to mock various filesystem operations +type Filesystem interface { + // from "os" + Stat(name string) (os.FileInfo, error) + Create(name string) (File, error) + Rename(oldpath, newpath string) error + MkdirAll(path string, perm os.FileMode) error + Chtimes(name string, atime time.Time, mtime time.Time) error + + // from "io/ioutil" + ReadFile(filename string) ([]byte, error) + TempFile(dir, prefix string) (File, error) +} + +// File is an interface that we can use to mock various filesystem operations typically +// accessed through the File object from the "os" package +type File interface { + // for now, the only os.File methods used are those below, add more as necessary + Name() string + Write(b []byte) (n int, err error) + Close() error +} diff --git a/pkg/kubelet/kubeletconfig/util/log/BUILD b/pkg/kubelet/kubeletconfig/util/log/BUILD new file mode 100644 index 0000000000..3003b84546 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/util/log/BUILD @@ -0,0 +1,28 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["log.go"], + tags = ["automanaged"], + deps = ["//vendor/github.com/golang/glog:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/pkg/kubelet/kubeletconfig/util/log/log.go b/pkg/kubelet/kubeletconfig/util/log/log.go new file mode 100644 index 0000000000..b4ecfe4dc9 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/util/log/log.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "fmt" + + "github.com/golang/glog" +) + +const logFmt = "kubelet config controller: %s" + +// Errorf shim that inserts "kubelet config controller" at the beginning of the log message, +// while still reporting the call site of the logging function. +func Errorf(format string, args ...interface{}) { + var s string + if len(args) > 0 { + s = fmt.Sprintf(format, args...) + } else { + s = format + } + glog.ErrorDepth(1, fmt.Sprintf(logFmt, s)) +} + +// Infof shim that inserts "kubelet config controller" at the beginning of the log message, +// while still reporting the call site of the logging function. +func Infof(format string, args ...interface{}) { + var s string + if len(args) > 0 { + s = fmt.Sprintf(format, args...) + } else { + s = format + } + glog.InfoDepth(1, fmt.Sprintf(logFmt, s)) +} diff --git a/pkg/kubelet/kubeletconfig/util/panic/BUILD b/pkg/kubelet/kubeletconfig/util/panic/BUILD new file mode 100644 index 0000000000..c0089b4f34 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/util/panic/BUILD @@ -0,0 +1,28 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["panic.go"], + tags = ["automanaged"], + deps = ["//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/pkg/kubelet/kubeletconfig/util/panic/panic.go b/pkg/kubelet/kubeletconfig/util/panic/panic.go new file mode 100644 index 0000000000..f1916ebf73 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/util/panic/panic.go @@ -0,0 +1,36 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package panic + +import utilruntime "k8s.io/apimachinery/pkg/util/runtime" + +// HandlePanic returns a function that wraps `fn` with the utilruntime.PanicHandlers, and continues +// to bubble the panic after the PanicHandlers are called +func HandlePanic(fn func()) func() { + return func() { + defer func() { + if r := recover(); r != nil { + for _, fn := range utilruntime.PanicHandlers { + fn(r) + } + panic(r) + } + }() + // call the function + fn() + } +} diff --git a/pkg/kubelet/kubeletconfig/util/test/BUILD b/pkg/kubelet/kubeletconfig/util/test/BUILD new file mode 100644 index 0000000000..64e10bbe76 --- /dev/null +++ b/pkg/kubelet/kubeletconfig/util/test/BUILD @@ -0,0 +1,27 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["test.go"], + tags = ["automanaged"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/pkg/kubelet/kubeletconfig/util/test/test.go b/pkg/kubelet/kubeletconfig/util/test/test.go new file mode 100644 index 0000000000..1c18e8344e --- /dev/null +++ b/pkg/kubelet/kubeletconfig/util/test/test.go @@ -0,0 +1,40 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "strings" + "testing" +) + +// SkipRest returns true if there was a non-nil error or if we expected an error that didn't happen, +// and logs the appropriate error on the test object. +// The return value indicates whether we should skip the rest of the test case due to the error result. +func SkipRest(t *testing.T, desc string, err error, contains string) bool { + if err != nil { + if len(contains) == 0 { + t.Errorf("case %q, expect nil error but got %q", desc, err.Error()) + } else if !strings.Contains(err.Error(), contains) { + t.Errorf("case %q, expect error to contain %q but got %q", desc, contains, err.Error()) + } + return true + } else if len(contains) > 0 { + t.Errorf("case %q, expect error to contain %q but got nil error", desc, contains) + return true + } + return false +} diff --git a/pkg/kubelet/kubeletconfig/watch.go b/pkg/kubelet/kubeletconfig/watch.go new file mode 100644 index 0000000000..2c0363c7ca --- /dev/null +++ b/pkg/kubelet/kubeletconfig/watch.go @@ -0,0 +1,125 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeletconfig + +import ( + "math/rand" + "time" + + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + kuberuntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + utilequal "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/equal" + utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log" +) + +// newSharedNodeInformer returns a shared informer that uses `client` to watch the Node with +// `nodeName` for changes and respond with `addFunc`, `updateFunc`, and `deleteFunc`. +func newSharedNodeInformer(client clientset.Interface, nodeName string, + addFunc func(newObj interface{}), + updateFunc func(oldObj interface{}, newObj interface{}), + deleteFunc func(deletedObj interface{})) cache.SharedInformer { + // select nodes by name + fieldselector := fields.OneTermEqualSelector("metadata.name", nodeName) + + // add some randomness to resync period, which can help avoid controllers falling into lock-step + minResyncPeriod := 15 * time.Minute + factor := rand.Float64() + 1 + resyncPeriod := time.Duration(float64(minResyncPeriod.Nanoseconds()) * factor) + + lw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (kuberuntime.Object, error) { + return client.Core().Nodes().List(metav1.ListOptions{ + FieldSelector: fieldselector.String(), + }) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return client.Core().Nodes().Watch(metav1.ListOptions{ + FieldSelector: fieldselector.String(), + ResourceVersion: options.ResourceVersion, + }) + }, + } + + handler := cache.ResourceEventHandlerFuncs{ + AddFunc: addFunc, + UpdateFunc: updateFunc, + DeleteFunc: deleteFunc, + } + + informer := cache.NewSharedInformer(lw, &apiv1.Node{}, resyncPeriod) + informer.AddEventHandler(handler) + + return informer +} + +// onAddNodeEvent calls onUpdateNodeEvent with the new object and a nil old object +func (cc *Controller) onAddNodeEvent(newObj interface{}) { + cc.onUpdateNodeEvent(nil, newObj) +} + +// onUpdateNodeEvent checks whether the configSource changed between oldObj and newObj, and pokes the +// configuration sync worker if there was a change +func (cc *Controller) onUpdateNodeEvent(oldObj interface{}, newObj interface{}) { + newNode, ok := newObj.(*apiv1.Node) + if !ok { + utillog.Errorf("failed to cast new object to Node, couldn't handle event") + return + } + if oldObj == nil { + // Node was just added, need to sync + cc.pokeConfigSourceWorker() + return + } + oldNode, ok := oldObj.(*apiv1.Node) + if !ok { + utillog.Errorf("failed to cast old object to Node, couldn't handle event") + return + } + if !utilequal.ConfigSourceEq(oldNode.Spec.ConfigSource, newNode.Spec.ConfigSource) { + cc.pokeConfigSourceWorker() + } +} + +// onDeleteNodeEvent logs a message if the Node was deleted and may log errors +// if an unexpected DeletedFinalStateUnknown was received. +// We allow the sync-loop to continue, because it is possible that the Kubelet detected +// a Node with unexpected externalID and is attempting to delete and re-create the Node +// (see pkg/kubelet/kubelet_node_status.go), or that someone accidently deleted the Node +// (the Kubelet will re-create it). +func (cc *Controller) onDeleteNodeEvent(deletedObj interface{}) { + node, ok := deletedObj.(*apiv1.Node) + if !ok { + tombstone, ok := deletedObj.(cache.DeletedFinalStateUnknown) + if !ok { + utillog.Errorf("couldn't cast deleted object to DeletedFinalStateUnknown, object: %+v", deletedObj) + return + } + node, ok = tombstone.Obj.(*apiv1.Node) + if !ok { + utillog.Errorf("received DeletedFinalStateUnknown object but it did not contain a Node, object: %+v", deletedObj) + return + } + utillog.Infof("Node was deleted (DeletedFinalStateUnknown), sync-loop will continue because the Kubelet might recreate the Node, node: %+v", node) + return + } + utillog.Infof("Node was deleted, sync-loop will continue because the Kubelet might recreate the Node, node: %+v", node) +} diff --git a/pkg/kubemark/hollow_kubelet.go b/pkg/kubemark/hollow_kubelet.go index 80de302f32..1ad3809b5a 100644 --- a/pkg/kubemark/hollow_kubelet.go +++ b/pkg/kubemark/hollow_kubelet.go @@ -110,6 +110,7 @@ func GetHollowKubeletConfig( // Flags struct f := &options.KubeletFlags{ + RootDirectory: testRootDir, HostnameOverride: nodeName, // Use the default runtime options. ContainerRuntimeOptions: *options.NewContainerRuntimeOptions(), @@ -123,7 +124,6 @@ func GetHollowKubeletConfig( c := &componentconfig.KubeletConfiguration{} api.Scheme.Convert(tmp, c, nil) - c.RootDirectory = testRootDir c.ManifestURL = "" c.Address = "0.0.0.0" /* bind address */ c.Port = int32(kubeletPort) diff --git a/plugin/pkg/admission/noderestriction/BUILD b/plugin/pkg/admission/noderestriction/BUILD index 4bf848152f..cf5d23774b 100644 --- a/plugin/pkg/admission/noderestriction/BUILD +++ b/plugin/pkg/admission/noderestriction/BUILD @@ -20,6 +20,7 @@ go_library( "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", "//pkg/kubeapiserver/admission:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", diff --git a/plugin/pkg/admission/noderestriction/admission.go b/plugin/pkg/admission/noderestriction/admission.go index 8fdcb4aea0..f1f77a9556 100644 --- a/plugin/pkg/admission/noderestriction/admission.go +++ b/plugin/pkg/admission/noderestriction/admission.go @@ -20,6 +20,7 @@ import ( "fmt" "io" + apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/admission" @@ -242,18 +243,44 @@ func (c *nodePlugin) admitPodEviction(nodeName string, a admission.Attributes) e func (c *nodePlugin) admitNode(nodeName string, a admission.Attributes) error { requestedName := a.GetName() - - // On create, get name from new object if unset in admission - if len(requestedName) == 0 && a.GetOperation() == admission.Create { + if a.GetOperation() == admission.Create { node, ok := a.GetObject().(*api.Node) if !ok { return admission.NewForbidden(a, fmt.Errorf("unexpected type %T", a.GetObject())) } - requestedName = node.Name - } + // Don't allow a node to create its Node API object with the config source set. + // We scope node access to things listed in the Node.Spec, so allowing this would allow a view escalation. + if node.Spec.ConfigSource != nil { + return admission.NewForbidden(a, fmt.Errorf("cannot create with non-nil configSource")) + } + + // On create, get name from new object if unset in admission + if len(requestedName) == 0 { + requestedName = node.Name + } + } if requestedName != nodeName { return admission.NewForbidden(a, fmt.Errorf("node %q cannot modify node %q", nodeName, requestedName)) } + + if a.GetOperation() == admission.Update { + node, ok := a.GetObject().(*api.Node) + if !ok { + return admission.NewForbidden(a, fmt.Errorf("unexpected type %T", a.GetObject())) + } + oldNode, ok := a.GetOldObject().(*api.Node) + if !ok { + return admission.NewForbidden(a, fmt.Errorf("unexpected type %T", a.GetObject())) + } + + // Don't allow a node to update the config source on its Node API object. + // We scope node access to things listed in the Node.Spec, so allowing this would allow a view escalation. + // We only do the check if the new node's configSource is non-nil; old kubelets might drop the field during a status update. + if node.Spec.ConfigSource != nil && !apiequality.Semantic.DeepEqual(node.Spec.ConfigSource, oldNode.Spec.ConfigSource) { + return admission.NewForbidden(a, fmt.Errorf("cannot update configSource to a new non-nil configSource")) + } + } + return nil } diff --git a/plugin/pkg/admission/noderestriction/admission_test.go b/plugin/pkg/admission/noderestriction/admission_test.go index 25f263cf97..7c109b2d3f 100644 --- a/plugin/pkg/admission/noderestriction/admission_test.go +++ b/plugin/pkg/admission/noderestriction/admission_test.go @@ -53,7 +53,12 @@ func Test_nodePlugin_Admit(t *testing.T) { mynode = &user.DefaultInfo{Name: "system:node:mynode", Groups: []string{"system:nodes"}} bob = &user.DefaultInfo{Name: "bob"} - mynodeObj = &api.Node{ObjectMeta: metav1.ObjectMeta{Name: "mynode"}} + mynodeObjMeta = metav1.ObjectMeta{Name: "mynode"} + mynodeObj = &api.Node{ObjectMeta: mynodeObjMeta} + mynodeObjConfigA = &api.Node{ObjectMeta: mynodeObjMeta, Spec: api.NodeSpec{ConfigSource: &api.NodeConfigSource{ + ConfigMapRef: &api.ObjectReference{Name: "foo", Namespace: "bar", UID: "fooUID"}}}} + mynodeObjConfigB = &api.Node{ObjectMeta: mynodeObjMeta, Spec: api.NodeSpec{ConfigSource: &api.NodeConfigSource{ + ConfigMapRef: &api.ObjectReference{Name: "qux", Namespace: "bar", UID: "quxUID"}}}} othernodeObj = &api.Node{ObjectMeta: metav1.ObjectMeta{Name: "othernode"}} mymirrorpod = makeTestPod("ns", "mymirrorpod", "mynode", true) @@ -586,6 +591,36 @@ func Test_nodePlugin_Admit(t *testing.T) { attributes: admission.NewAttributesRecord(mynodeObj, mynodeObj, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "status", admission.Update, mynode), err: "", }, + { + name: "forbid create of my node with non-nil configSource", + podsGetter: noExistingPods, + attributes: admission.NewAttributesRecord(mynodeObjConfigA, nil, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Create, mynode), + err: "create with non-nil configSource", + }, + { + name: "forbid update of my node: nil configSource to new non-nil configSource", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(mynodeObjConfigA, mynodeObj, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, mynode), + err: "update configSource to a new non-nil configSource", + }, + { + name: "forbid update of my node: non-nil configSource to new non-nil configSource", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(mynodeObjConfigB, mynodeObjConfigA, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, mynode), + err: "update configSource to a new non-nil configSource", + }, + { + name: "allow update of my node: non-nil configSource unchanged", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(mynodeObjConfigA, mynodeObjConfigA, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, mynode), + err: "", + }, + { + name: "allow update of my node: non-nil configSource to nil configSource", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(mynodeObj, mynodeObjConfigA, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, mynode), + err: "", + }, // Other node object { diff --git a/staging/src/k8s.io/api/core/v1/register.go b/staging/src/k8s.io/api/core/v1/register.go index 73252bb14c..4916ee3c23 100644 --- a/staging/src/k8s.io/api/core/v1/register.go +++ b/staging/src/k8s.io/api/core/v1/register.go @@ -59,6 +59,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { &Endpoints{}, &EndpointsList{}, &Node{}, + &NodeConfigSource{}, &NodeList{}, &NodeProxyOptions{}, &Binding{}, diff --git a/staging/src/k8s.io/api/core/v1/types.go b/staging/src/k8s.io/api/core/v1/types.go index c2c574e344..e6b41dabbe 100644 --- a/staging/src/k8s.io/api/core/v1/types.go +++ b/staging/src/k8s.io/api/core/v1/types.go @@ -3332,6 +3332,18 @@ type NodeSpec struct { // If specified, the node's taints. // +optional Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"` + // If specified, the source to get node configuration from + // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field + // +optional + ConfigSource *NodeConfigSource `json:"configSource,omitempty" protobuf:"bytes,6,opt,name=configSource"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. +type NodeConfigSource struct { + metav1.TypeMeta `json:",inline"` + ConfigMapRef *ObjectReference `json:"configMapRef,omitempty" protobuf:"bytes,1,opt,name=configMapRef"` } // DaemonEndpoint contains information about a single Daemon endpoint. diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go index b9a8fd02d5..7049c9a33b 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go @@ -39,6 +39,9 @@ func AddConversionFuncs(scheme *runtime.Scheme) error { Convert_unversioned_Time_To_unversioned_Time, + Convert_Pointer_v1_Duration_To_v1_Duration, + Convert_v1_Duration_To_Pointer_v1_Duration, + Convert_Slice_string_To_unversioned_Time, Convert_resource_Quantity_To_resource_Quantity, @@ -181,6 +184,21 @@ func Convert_unversioned_Time_To_unversioned_Time(in *Time, out *Time, s convers return nil } +func Convert_Pointer_v1_Duration_To_v1_Duration(in **Duration, out *Duration, s conversion.Scope) error { + if *in == nil { + *out = Duration{} // zero duration + return nil + } + *out = **in // copy + return nil +} + +func Convert_v1_Duration_To_Pointer_v1_Duration(in *Duration, out **Duration, s conversion.Scope) error { + temp := *in //copy + *out = &temp + return nil +} + // Convert_Slice_string_To_unversioned_Time allows converting a URL query parameter value func Convert_Slice_string_To_unversioned_Time(input *[]string, out *Time, s conversion.Scope) error { str := "" diff --git a/staging/src/k8s.io/sample-apiserver/pkg/registry/registry.go b/staging/src/k8s.io/sample-apiserver/pkg/registry/registry.go index 08eb057626..32da2eda90 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/registry/registry.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/registry/registry.go @@ -33,7 +33,7 @@ type REST struct { // a wrapper for wardle registries. func RESTInPeace(storage rest.StandardStorage, err error) rest.StandardStorage { if err != nil { - err = fmt.Errorf("Unable to create REST storage for a resource due to %v. Committing suicide.", err) + err = fmt.Errorf("unable to create REST storage for a resource due to %v, will die", err) panic(err) } return storage diff --git a/test/e2e_node/BUILD b/test/e2e_node/BUILD index 519b3e30b3..18841759ef 100644 --- a/test/e2e_node/BUILD +++ b/test/e2e_node/BUILD @@ -54,6 +54,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/test/e2e_node/dynamic_kubelet_configuration_test.go b/test/e2e_node/dynamic_kubelet_configuration_test.go index 1b2bf6977f..b02273d270 100644 --- a/test/e2e_node/dynamic_kubelet_configuration_test.go +++ b/test/e2e_node/dynamic_kubelet_configuration_test.go @@ -30,7 +30,7 @@ import ( var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKubeletConfig] [Serial] [Disruptive]", func() { f := framework.NewDefaultFramework("dynamic-kubelet-configuration-test") - Context("When a configmap called `kubelet-{node-name}` is added to the `kube-system` namespace", func() { + Context("When the config source on a Node is updated to point to new config", func() { It("The Kubelet on that node should restart to take up the new config", func() { // Get the current KubeletConfiguration (known to be valid) by // querying the configz endpoint for the current node. diff --git a/test/e2e_node/services/BUILD b/test/e2e_node/services/BUILD index 4b50ae8efd..88c183e157 100644 --- a/test/e2e_node/services/BUILD +++ b/test/e2e_node/services/BUILD @@ -25,6 +25,7 @@ go_library( "//cmd/kube-apiserver/app/options:go_default_library", "//pkg/api:go_default_library", "//pkg/controller/namespace:go_default_library", + "//pkg/features:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e_node/builder:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", diff --git a/test/e2e_node/services/kubelet.go b/test/e2e_node/services/kubelet.go index e8e8795eb5..9cbc9d5ba1 100644 --- a/test/e2e_node/services/kubelet.go +++ b/test/e2e_node/services/kubelet.go @@ -28,6 +28,8 @@ import ( "github.com/golang/glog" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e_node/builder" ) @@ -93,6 +95,9 @@ const ( func (e *E2EServices) startKubelet() (*server, error) { glog.Info("Starting kubelet") + // set feature gates so we can check which features are enabled and pass the appropriate flags + utilfeature.DefaultFeatureGate.Set(framework.TestContext.FeatureGates) + // Build kubeconfig kubeconfigPath, err := createKubeconfigCWD() if err != nil { @@ -164,6 +169,16 @@ func (e *E2EServices) startKubelet() (*server, error) { "--eviction-minimum-reclaim", "nodefs.available=5%,nodefs.inodesFree=5%", // The minimum reclaimed resources after eviction. "--v", LOG_VERBOSITY_LEVEL, "--logtostderr", ) + + if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) { + // Enable dynamic config if the feature gate is enabled + dynamicConfigDir, err := getDynamicConfigDir() + if err != nil { + return nil, err + } + cmdArgs = append(cmdArgs, "--dynamic-config-dir", dynamicConfigDir) + } + // Enable kubenet by default. cniBinDir, err := getCNIBinDirectory() if err != nil { @@ -294,6 +309,15 @@ func getCNIConfDirectory() (string, error) { return filepath.Join(cwd, "cni", "net.d"), nil } +// getDynamicConfigDir returns the directory for dynamic Kubelet configuration +func getDynamicConfigDir() (string, error) { + cwd, err := os.Getwd() + if err != nil { + return "", err + } + return filepath.Join(cwd, "dynamic-kubelet-config"), nil +} + // adjustArgsForSystemd escape special characters in kubelet arguments for systemd. Systemd // may try to do auto expansion without escaping. func adjustArgsForSystemd(args []string) { diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index 1d5ac3683d..ea5a5b8b88 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -29,8 +29,9 @@ import ( "github.com/golang/glog" "k8s.io/api/core/v1" - k8serr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/kubernetes/scheme" "k8s.io/kubernetes/pkg/api" @@ -128,20 +129,16 @@ func isKubeletConfigEnabled(f *framework.Framework) (bool, error) { return strings.Contains(cfgz.FeatureGates, "DynamicKubeletConfig=true"), nil } -// Queries the API server for a Kubelet configuration for the node described by framework.TestContext.NodeName -func getCurrentKubeletConfigMap(f *framework.Framework) (*v1.ConfigMap, error) { - return f.ClientSet.Core().ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", framework.TestContext.NodeName), metav1.GetOptions{}) -} - // Creates or updates the configmap for KubeletConfiguration, waits for the Kubelet to restart -// with the new configuration. Returns an error if the configuration after waiting 40 seconds +// with the new configuration. Returns an error if the configuration after waiting for restartGap // doesn't match what you attempted to set, or if the dynamic configuration feature is disabled. func setKubeletConfiguration(f *framework.Framework, kubeCfg *componentconfig.KubeletConfiguration) error { const ( - restartGap = 30 * time.Second + restartGap = 40 * time.Second + pollInterval = 5 * time.Second ) - // Make sure Dynamic Kubelet Configuration feature is enabled on the Kubelet we are about to reconfigure + // make sure Dynamic Kubelet Configuration feature is enabled on the Kubelet we are about to reconfigure configEnabled, err := isKubeletConfigEnabled(f) if err != nil { return fmt.Errorf("could not determine whether 'DynamicKubeletConfig' feature is enabled, err: %v", err) @@ -152,37 +149,47 @@ func setKubeletConfiguration(f *framework.Framework, kubeCfg *componentconfig.Ku "For `make test-e2e-node`, you can set `TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'`.") } - // Check whether a configmap for KubeletConfiguration already exists - _, err = getCurrentKubeletConfigMap(f) + nodeclient := f.ClientSet.CoreV1().Nodes() - if k8serr.IsNotFound(err) { - _, err := createConfigMap(f, kubeCfg) - if err != nil { - return err - } - } else if err != nil { - return err - } else { - // The configmap exists, update it instead of creating it. - _, err := updateConfigMap(f, kubeCfg) - if err != nil { - return err - } - } - - // Wait for the Kubelet to restart. - time.Sleep(restartGap) - - // Retrieve the new config and compare it to the one we attempted to set - newKubeCfg, err := getCurrentKubeletConfig() + // create the ConfigMap with the new configuration + cm, err := createConfigMap(f, kubeCfg) if err != nil { return err } - // Return an error if the desired config is not in use by now - if !reflect.DeepEqual(*kubeCfg, *newKubeCfg) { - return fmt.Errorf("either the Kubelet did not restart or it did not present the modified configuration via /configz after restarting.") + // create the correct reference object + src := &v1.NodeConfigSource{ + ConfigMapRef: &v1.ObjectReference{ + Namespace: "kube-system", + Name: cm.Name, + UID: cm.UID, + }, } + + // serialize the new node config source + raw, err := json.Marshal(src) + framework.ExpectNoError(err) + data := []byte(fmt.Sprintf(`{"spec":{"configSource":%s}}`, raw)) + + // patch the node + _, err = nodeclient.Patch(framework.TestContext.NodeName, + types.StrategicMergePatchType, + data) + framework.ExpectNoError(err) + + // poll for new config, for a maximum wait of restartGap + Eventually(func() error { + newKubeCfg, err := getCurrentKubeletConfig() + if err != nil { + return fmt.Errorf("failed trying to get current Kubelet config, will retry, error: %v", err) + } + if !reflect.DeepEqual(*kubeCfg, *newKubeCfg) { + return fmt.Errorf("still waiting for new configuration to take effect, will continue to watch /configz") + } + glog.Infof("new configuration has taken effect") + return nil + }, restartGap, pollInterval).Should(BeNil()) + return nil } @@ -239,28 +246,9 @@ func decodeConfigz(resp *http.Response) (*componentconfig.KubeletConfiguration, return &kubeCfg, nil } -// Constructs a Kubelet ConfigMap targeting the current node running the node e2e tests -func makeKubeletConfigMap(nodeName string, kubeCfg *componentconfig.KubeletConfiguration) *v1.ConfigMap { - kubeCfgExt := v1alpha1.KubeletConfiguration{} - api.Scheme.Convert(kubeCfg, &kubeCfgExt, nil) - - bytes, err := json.Marshal(kubeCfgExt) - framework.ExpectNoError(err) - - cmap := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("kubelet-%s", nodeName), - }, - Data: map[string]string{ - "kubelet.config": string(bytes), - }, - } - return cmap -} - -// Uses KubeletConfiguration to create a `kubelet-` ConfigMap in the "kube-system" namespace. -func createConfigMap(f *framework.Framework, kubeCfg *componentconfig.KubeletConfiguration) (*v1.ConfigMap, error) { - cmap := makeKubeletConfigMap(framework.TestContext.NodeName, kubeCfg) +// creates a configmap containing kubeCfg in kube-system namespace +func createConfigMap(f *framework.Framework, internalKC *componentconfig.KubeletConfiguration) (*v1.ConfigMap, error) { + cmap := makeKubeletConfigMap(internalKC) cmap, err := f.ClientSet.Core().ConfigMaps("kube-system").Create(cmap) if err != nil { return nil, err @@ -268,14 +256,25 @@ func createConfigMap(f *framework.Framework, kubeCfg *componentconfig.KubeletCon return cmap, nil } -// Similar to createConfigMap, except this updates an existing ConfigMap. -func updateConfigMap(f *framework.Framework, kubeCfg *componentconfig.KubeletConfiguration) (*v1.ConfigMap, error) { - cmap := makeKubeletConfigMap(framework.TestContext.NodeName, kubeCfg) - cmap, err := f.ClientSet.Core().ConfigMaps("kube-system").Update(cmap) - if err != nil { - return nil, err +// constructs a ConfigMap, populating one of its keys with the KubeletConfiguration. Uses GenerateName. +func makeKubeletConfigMap(internalKC *componentconfig.KubeletConfiguration) *v1.ConfigMap { + externalKC := &v1alpha1.KubeletConfiguration{} + api.Scheme.Convert(internalKC, externalKC, nil) + + encoder, err := newJSONEncoder(componentconfig.GroupName) + framework.ExpectNoError(err) + + data, err := runtime.Encode(encoder, externalKC) + framework.ExpectNoError(err) + + cmap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{GenerateName: "testcfg"}, + Data: map[string]string{ + "kubelet": string(data), + }, } - return cmap, nil + + return cmap } func logPodEvents(f *framework.Framework) { @@ -309,3 +308,20 @@ func logKubeletMetrics(metricKeys ...string) { framework.Logf("Kubelet Metrics: %+v", framework.GetKubeletMetrics(metric, metricSet)) } } + +func newJSONEncoder(groupName string) (runtime.Encoder, error) { + // encode to json + mediaType := "application/json" + info, ok := runtime.SerializerInfoForMediaType(api.Codecs.SupportedMediaTypes(), mediaType) + if !ok { + return nil, fmt.Errorf("unsupported media type %q", mediaType) + } + + versions := api.Registry.EnabledVersionsForGroup(groupName) + if len(versions) == 0 { + return nil, fmt.Errorf("no enabled versions for group %q", groupName) + } + + // the "best" version supposedly comes first in the list returned from api.Registry.EnabledVersionsForGroup + return api.Codecs.EncoderForVersion(info.Serializer, versions[0]), nil +} diff --git a/test/integration/etcd/etcd_storage_path_test.go b/test/integration/etcd/etcd_storage_path_test.go index 8ccfa5bdc6..1ca4cd4f96 100644 --- a/test/integration/etcd/etcd_storage_path_test.go +++ b/test/integration/etcd/etcd_storage_path_test.go @@ -348,6 +348,7 @@ var ephemeralWhiteList = createEphemeralWhiteList( gvr("", "v1", "rangeallocations"), // stored in various places in etcd but cannot be directly created gvr("", "v1", "componentstatuses"), // status info not stored in etcd gvr("", "v1", "serializedreferences"), // used for serilization, not stored in etcd + gvr("", "v1", "nodeconfigsources"), // subfield of node.spec, but shouldn't be directly created gvr("", "v1", "podstatusresults"), // wrapper object not stored in etcd // --