k3s/pkg/cluster/managed.go

169 lines
5.5 KiB
Go
Raw Normal View History

2020-05-05 21:59:15 +00:00
package cluster
// A managed database is one whose lifecycle we control - initializing the cluster, adding/removing members, taking snapshots, etc.
// This is currently just used for the embedded etcd datastore. Kine and other external etcd clusters are NOT considered managed.
2020-05-05 21:59:15 +00:00
import (
"context"
"fmt"
2020-05-05 21:59:15 +00:00
"net/http"
"net/url"
"os"
2020-05-05 21:59:15 +00:00
"time"
"github.com/k3s-io/k3s/pkg/cluster/managed"
"github.com/k3s-io/k3s/pkg/etcd"
"github.com/k3s-io/k3s/pkg/nodepassword"
"github.com/k3s-io/k3s/pkg/version"
2020-05-05 21:59:15 +00:00
"github.com/sirupsen/logrus"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/wait"
2020-05-05 21:59:15 +00:00
)
// testClusterDB returns a channel that will be closed when the datastore connection is available.
// The datastore is tested for readiness every 5 seconds until the test succeeds.
2020-05-05 21:59:15 +00:00
func (c *Cluster) testClusterDB(ctx context.Context) (<-chan struct{}, error) {
result := make(chan struct{})
if c.managedDB == nil {
close(result)
return result, nil
}
go func() {
defer close(result)
for {
if err := c.managedDB.Test(ctx); err != nil {
2020-05-05 21:59:15 +00:00
logrus.Infof("Failed to test data store connection: %v", err)
} else {
logrus.Info(c.managedDB.EndpointName() + " data store connection OK")
2020-05-05 21:59:15 +00:00
return
}
select {
case <-time.After(5 * time.Second):
case <-ctx.Done():
return
}
}
}()
return result, nil
}
// start starts the database, unless a cluster reset has been requested, in which case
// it does that instead.
2020-05-05 21:59:15 +00:00
func (c *Cluster) start(ctx context.Context) error {
if c.managedDB == nil {
return nil
}
rebootstrap := func() error {
return c.storageBootstrap(ctx)
}
2020-05-05 21:59:15 +00:00
resetDone, err := c.managedDB.IsReset()
if err != nil {
return err
}
if c.config.ClusterReset {
// If we're restoring from a snapshot, don't check the reset-flag - just reset and restore.
if c.config.ClusterResetRestorePath != "" {
return c.managedDB.Reset(ctx, rebootstrap)
}
// If the reset-flag doesn't exist, reset. This will create the reset-flag if it succeeds.
if !resetDone {
return c.managedDB.Reset(ctx, rebootstrap)
}
// The reset-flag exists, ask the user to remove it if they want to reset again.
return fmt.Errorf("Managed etcd cluster membership was previously reset, please remove the cluster-reset flag and start %s normally. "+
"If you need to perform another cluster reset, you must first manually delete the file at %s", version.Program, c.managedDB.ResetFile())
2020-05-05 21:59:15 +00:00
}
if resetDone {
// If the cluster was reset, we need to delete the node passwd secret in case the node
// password from the previously restored snapshot differs from the current password on disk.
c.config.Runtime.ClusterControllerStarts["node-password-secret-cleanup"] = c.deleteNodePasswdSecret
}
2020-05-05 21:59:15 +00:00
// Starting the managed database will clear the reset-flag if set
2020-05-05 21:59:15 +00:00
return c.managedDB.Start(ctx, c.clientAccessInfo)
}
// registerDBHandlers registers routes for database info with the http request handler
func (c *Cluster) registerDBHandlers(handler http.Handler) (http.Handler, error) {
2020-05-05 21:59:15 +00:00
if c.managedDB == nil {
return handler, nil
2020-05-05 21:59:15 +00:00
}
return c.managedDB.Register(handler)
2020-05-05 21:59:15 +00:00
}
// assignManagedDriver assigns a driver based on a number of different configuration variables.
// If a driver has been initialized it is used.
// If no specific endpoint has been requested and creating or joining has been requested,
// we use the default driver.
// If none of the above are true, no managed driver is assigned.
2020-05-05 21:59:15 +00:00
func (c *Cluster) assignManagedDriver(ctx context.Context) error {
// Check all managed drivers for an initialized database on disk; use one if found
2020-05-05 21:59:15 +00:00
for _, driver := range managed.Registered() {
if err := driver.SetControlConfig(c.config); err != nil {
2020-05-05 21:59:15 +00:00
return err
}
if ok, err := driver.IsInitialized(); err != nil {
return err
} else if ok {
c.managedDB = driver
return nil
}
}
// If we have been asked to initialize or join a cluster, do so using the default managed database.
2020-05-05 21:59:15 +00:00
if c.config.Datastore.Endpoint == "" && (c.config.ClusterInit || (c.config.Token != "" && c.config.JoinURL != "")) {
c.managedDB = managed.Default()
2020-05-05 21:59:15 +00:00
}
return nil
}
// setupEtcdProxy starts a goroutine to periodically update the etcd proxy with the current list of
// cluster client URLs, as retrieved from etcd.
func (c *Cluster) setupEtcdProxy(ctx context.Context, etcdProxy etcd.Proxy) {
if c.managedDB == nil {
return
}
// We use Poll here instead of Until because we want to wait the interval before running the function.
go wait.PollUntilWithContext(ctx, 30*time.Second, func(ctx context.Context) (bool, error) {
clientURLs, err := c.managedDB.GetMembersClientURLs(ctx)
if err != nil {
logrus.Warnf("Failed to get etcd ClientURLs: %v", err)
return false, nil
}
// client URLs are a full URI, but the proxy only wants host:port
for i, c := range clientURLs {
u, err := url.Parse(c)
if err != nil {
logrus.Warnf("Failed to parse etcd ClientURL: %v", err)
return false, nil
}
clientURLs[i] = u.Host
}
etcdProxy.Update(clientURLs)
return false, nil
})
}
// deleteNodePasswdSecret wipes out the node password secret after restoration
func (c *Cluster) deleteNodePasswdSecret(ctx context.Context) {
nodeName := os.Getenv("NODE_NAME")
secretsClient := c.config.Runtime.Core.Core().V1().Secret()
if err := nodepassword.Delete(secretsClient, nodeName); err != nil {
if apierrors.IsNotFound(err) {
logrus.Debugf("Node password secret is not found for node %s", nodeName)
return
}
logrus.Warnf("failed to delete old node password secret: %v", err)
}
}