Move request handlers out of server package

The servers package, and router.go in particular, had become quite
large. Address this by moving some things out to separate packages:
* http request handlers all move to pkg/server/handlers.
* node password bootstrap auth handler goes into pkg/nodepassword with
  the other nodepassword code.

While we're at it, also be more consistent about calling variables that
hold a config.Control struct or reference `control` instead of `config` or `server`.

Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
(cherry picked from commit 2e4e7cf2c1)
Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
pull/11561/head
Brad Davidson 2024-12-13 21:39:15 +00:00 committed by Brad Davidson
parent ebd9961896
commit 0d62e7308d
11 changed files with 827 additions and 718 deletions

View File

@ -15,6 +15,7 @@ import (
"github.com/k3s-io/k3s/pkg/proctitle"
"github.com/k3s-io/k3s/pkg/secretsencrypt"
"github.com/k3s-io/k3s/pkg/server"
"github.com/k3s-io/k3s/pkg/server/handlers"
"github.com/k3s-io/k3s/pkg/version"
"github.com/pkg/errors"
"github.com/urfave/cli"
@ -54,7 +55,7 @@ func Enable(app *cli.Context) error {
if err != nil {
return err
}
b, err := json.Marshal(server.EncryptionRequest{Enable: ptr.To(true)})
b, err := json.Marshal(handlers.EncryptionRequest{Enable: ptr.To(true)})
if err != nil {
return err
}
@ -73,7 +74,7 @@ func Disable(app *cli.Context) error {
if err != nil {
return err
}
b, err := json.Marshal(server.EncryptionRequest{Enable: ptr.To(false)})
b, err := json.Marshal(handlers.EncryptionRequest{Enable: ptr.To(false)})
if err != nil {
return err
}
@ -96,7 +97,7 @@ func Status(app *cli.Context) error {
if err != nil {
return wrapServerError(err)
}
status := server.EncryptionState{}
status := handlers.EncryptionState{}
if err := json.Unmarshal(data, &status); err != nil {
return err
}
@ -153,7 +154,7 @@ func Prepare(app *cli.Context) error {
if err != nil {
return err
}
b, err := json.Marshal(server.EncryptionRequest{
b, err := json.Marshal(handlers.EncryptionRequest{
Stage: ptr.To(secretsencrypt.EncryptionPrepare),
Force: cmds.ServerConfig.EncryptForce,
})
@ -175,7 +176,7 @@ func Rotate(app *cli.Context) error {
if err != nil {
return err
}
b, err := json.Marshal(server.EncryptionRequest{
b, err := json.Marshal(handlers.EncryptionRequest{
Stage: ptr.To(secretsencrypt.EncryptionRotate),
Force: cmds.ServerConfig.EncryptForce,
})
@ -197,7 +198,7 @@ func Reencrypt(app *cli.Context) error {
if err != nil {
return err
}
b, err := json.Marshal(server.EncryptionRequest{
b, err := json.Marshal(handlers.EncryptionRequest{
Stage: ptr.To(secretsencrypt.EncryptionReencryptActive),
Force: cmds.ServerConfig.EncryptForce,
Skip: cmds.ServerConfig.EncryptSkip,
@ -220,7 +221,7 @@ func RotateKeys(app *cli.Context) error {
if err != nil {
return err
}
b, err := json.Marshal(server.EncryptionRequest{
b, err := json.Marshal(handlers.EncryptionRequest{
Stage: ptr.To(secretsencrypt.EncryptionRotateKeys),
})
if err != nil {

View File

@ -16,6 +16,7 @@ import (
"github.com/k3s-io/k3s/pkg/kubeadm"
"github.com/k3s-io/k3s/pkg/proctitle"
"github.com/k3s-io/k3s/pkg/server"
"github.com/k3s-io/k3s/pkg/server/handlers"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/version"
"github.com/pkg/errors"
@ -153,7 +154,7 @@ func Rotate(app *cli.Context) error {
if err != nil {
return err
}
b, err := json.Marshal(server.TokenRotateRequest{
b, err := json.Marshal(handlers.TokenRotateRequest{
NewToken: ptr.To(cmds.TokenConfig.NewToken),
})
if err != nil {

View File

@ -391,11 +391,11 @@ func genClientCerts(config *config.Control) error {
}
}
if _, err = factory(user.KubeProxy, nil, runtime.ClientKubeProxyCert, runtime.ClientKubeProxyKey); err != nil {
if _, _, err := certutil.LoadOrGenerateKeyFile(runtime.ClientKubeProxyKey, regen); err != nil {
return err
}
// This user (system:k3s-controller by default) must be bound to a role in rolebindings.yaml or the downstream equivalent
if _, err = factory("system:"+version.Program+"-controller", nil, runtime.ClientK3sControllerCert, runtime.ClientK3sControllerKey); err != nil {
if _, _, err := certutil.LoadOrGenerateKeyFile(runtime.ClientK3sControllerKey, regen); err != nil {
return err
}

View File

@ -0,0 +1,233 @@
package nodepassword
import (
"context"
"net"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"sync"
"time"
"github.com/gorilla/mux"
"github.com/k3s-io/k3s/pkg/daemons/config"
"github.com/k3s-io/k3s/pkg/util"
"github.com/pkg/errors"
coreclient "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/kubernetes/pkg/auth/nodeidentifier"
)
var identifier = nodeidentifier.NewDefaultNodeIdentifier()
// NodeAuthValidator returns a node name, or http error code and error
type NodeAuthValidator func(req *http.Request) (string, int, error)
// nodeInfo contains information on the requesting node, derived from auth creds
// and request headers.
type nodeInfo struct {
Name string
Password string
User user.Info
}
// GetNodeAuthValidator returns a function that will be called to validate node password authentication.
// Node password authentication is used when requesting kubelet certificates, and verifies that the
// credentials are valid for the requested node name, and that the node password is valid if it exists.
// These checks prevent a user with access to one agent from requesting kubelet certificates that
// could be used to impersonate another cluster member.
func GetNodeAuthValidator(ctx context.Context, control *config.Control) NodeAuthValidator {
runtime := control.Runtime
deferredNodes := map[string]bool{}
var secretClient coreclient.SecretController
var nodeClient coreclient.NodeController
var mu sync.Mutex
return func(req *http.Request) (string, int, error) {
node, err := getNodeInfo(req)
if err != nil {
return "", http.StatusBadRequest, err
}
// node identity auth uses an existing kubelet client cert instead of auth token.
// If used, validate that the node identity matches the requested node name.
nodeName, isNodeAuth := identifier.NodeIdentity(node.User)
if isNodeAuth && nodeName != node.Name {
return "", http.StatusBadRequest, errors.New("header node name does not match auth node name")
}
// get client address, to see if deferred node password validation should be allowed when the apiserver
// is not available. Deferred password validation is only allowed for requests from the local client.
client, _, _ := net.SplitHostPort(req.RemoteAddr)
isLocal := client == "127.0.0.1" || client == "::1"
if secretClient == nil || nodeClient == nil {
if runtime.Core != nil {
// initialize the client if we can
secretClient = runtime.Core.Core().V1().Secret()
nodeClient = runtime.Core.Core().V1().Node()
} else if isLocal && node.Name == os.Getenv("NODE_NAME") {
// If we're verifying our own password, verify it locally and ensure a secret later.
return verifyLocalPassword(ctx, control, &mu, deferredNodes, node)
} else if isLocal && control.DisableAPIServer && !isNodeAuth {
// If we're running on an etcd-only node, and the request didn't use Node Identity auth,
// defer node password verification until an apiserver joins the cluster.
return verifyRemotePassword(ctx, control, &mu, deferredNodes, node)
} else {
// Otherwise, reject the request until the core is ready.
return "", http.StatusServiceUnavailable, util.ErrCoreNotReady
}
}
// verify that the node exists, if using Node Identity auth
if err := verifyNode(ctx, nodeClient, node); err != nil {
return "", http.StatusUnauthorized, err
}
// verify that the node password secret matches, or create it if it does not
if err := Ensure(secretClient, node.Name, node.Password); err != nil {
// if the verification failed, reject the request
if errors.Is(err, ErrVerifyFailed) {
return "", http.StatusForbidden, err
}
// If verification failed due to an error creating the node password secret, allow
// the request, but retry verification until the outage is resolved. This behavior
// allows nodes to join the cluster during outages caused by validating webhooks
// blocking secret creation - if the outage requires new nodes to join in order to
// run the webhook pods, we must fail open here to resolve the outage.
return verifyRemotePassword(ctx, control, &mu, deferredNodes, node)
}
return node.Name, http.StatusOK, nil
}
}
// getNodeInfo returns node name, password, and user extracted
// from request headers and context. An error is returned
// if any critical fields are missing.
func getNodeInfo(req *http.Request) (*nodeInfo, error) {
user, ok := request.UserFrom(req.Context())
if !ok {
return nil, errors.New("auth user not set")
}
program := mux.Vars(req)["program"]
nodeName := req.Header.Get(program + "-Node-Name")
if nodeName == "" {
return nil, errors.New("node name not set")
}
nodePassword := req.Header.Get(program + "-Node-Password")
if nodePassword == "" {
return nil, errors.New("node password not set")
}
return &nodeInfo{
Name: strings.ToLower(nodeName),
Password: nodePassword,
User: user,
}, nil
}
// verifyLocalPassword is used to validate the local node's password secret directly against the node password file, when the apiserver is unavailable.
// This is only used early in startup, when a control-plane node's agent is starting up without a functional apiserver.
func verifyLocalPassword(ctx context.Context, control *config.Control, mu *sync.Mutex, deferredNodes map[string]bool, node *nodeInfo) (string, int, error) {
// do not attempt to verify the node password if the local host is not running an agent and does not have a node resource.
// note that the agent certs and kubeconfigs are created even if the agent is disabled; the only thing that is skipped is starting the kubelet and container runtime.
if control.DisableAgent {
return node.Name, http.StatusOK, nil
}
// use same password file location that the agent creates
nodePasswordRoot := "/"
if control.Rootless {
nodePasswordRoot = filepath.Join(path.Dir(control.DataDir), "agent")
}
nodeConfigPath := filepath.Join(nodePasswordRoot, "etc", "rancher", "node")
nodePasswordFile := filepath.Join(nodeConfigPath, "password")
passBytes, err := os.ReadFile(nodePasswordFile)
if err != nil {
return "", http.StatusInternalServerError, errors.Wrap(err, "unable to read node password file")
}
passHash, err := Hasher.CreateHash(strings.TrimSpace(string(passBytes)))
if err != nil {
return "", http.StatusInternalServerError, errors.Wrap(err, "unable to hash node password file")
}
if err := Hasher.VerifyHash(passHash, node.Password); err != nil {
return "", http.StatusForbidden, errors.Wrap(err, "unable to verify local node password")
}
mu.Lock()
defer mu.Unlock()
if _, ok := deferredNodes[node.Name]; !ok {
deferredNodes[node.Name] = true
go ensureSecret(ctx, control, node)
logrus.Infof("Password verified locally for node %s", node.Name)
}
return node.Name, http.StatusOK, nil
}
// verifyRemotePassword is used when the server does not have a local apisever, as in the case of etcd-only nodes.
// The node password is ensured once an apiserver joins the cluster.
func verifyRemotePassword(ctx context.Context, control *config.Control, mu *sync.Mutex, deferredNodes map[string]bool, node *nodeInfo) (string, int, error) {
mu.Lock()
defer mu.Unlock()
if _, ok := deferredNodes[node.Name]; !ok {
deferredNodes[node.Name] = true
go ensureSecret(ctx, control, node)
logrus.Infof("Password verification deferred for node %s", node.Name)
}
return node.Name, http.StatusOK, nil
}
// verifyNode confirms that a node with the given name exists, to prevent auth
// from succeeding with a client certificate for a node that has been deleted from the cluster.
func verifyNode(ctx context.Context, nodeClient coreclient.NodeController, node *nodeInfo) error {
if nodeName, isNodeAuth := identifier.NodeIdentity(node.User); isNodeAuth {
if _, err := nodeClient.Cache().Get(nodeName); err != nil {
return errors.Wrap(err, "unable to verify node identity")
}
}
return nil
}
// ensureSecret validates a server's node password secret once the apiserver is up.
// As the node has already joined the cluster at this point, this is purely informational.
func ensureSecret(ctx context.Context, control *config.Control, node *nodeInfo) {
runtime := control.Runtime
_ = wait.PollUntilContextCancel(ctx, time.Second*5, true, func(ctx context.Context) (bool, error) {
if runtime.Core != nil {
secretClient := runtime.Core.Core().V1().Secret()
// This is consistent with events attached to the node generated by the kubelet
// https://github.com/kubernetes/kubernetes/blob/612130dd2f4188db839ea5c2dea07a96b0ad8d1c/pkg/kubelet/kubelet.go#L479-L485
nodeRef := &corev1.ObjectReference{
Kind: "Node",
Name: node.Name,
UID: types.UID(node.Name),
Namespace: "",
}
if err := Ensure(secretClient, node.Name, node.Password); err != nil {
runtime.Event.Eventf(nodeRef, corev1.EventTypeWarning, "NodePasswordValidationFailed", "Deferred node password secret validation failed: %v", err)
// Return true to stop polling if the password verification failed; only retry on secret creation errors.
return errors.Is(err, ErrVerifyFailed), nil
}
runtime.Event.Event(nodeRef, corev1.EventTypeNormal, "NodePasswordValidationComplete", "Deferred node password secret validation complete")
return true, nil
}
return false, nil
})
}

View File

@ -1,4 +1,4 @@
package server
package handlers
import (
"bytes"
@ -28,14 +28,14 @@ import (
"k8s.io/client-go/util/keyutil"
)
func caCertReplaceHandler(server *config.Control) http.HandlerFunc {
func CACertReplace(control *config.Control) http.HandlerFunc {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
if req.Method != http.MethodPut {
util.SendError(fmt.Errorf("method not allowed"), resp, req, http.StatusMethodNotAllowed)
return
}
force, _ := strconv.ParseBool(req.FormValue("force"))
if err := caCertReplace(server, req.Body, force); err != nil {
if err := caCertReplace(control, req.Body, force); err != nil {
util.SendErrorWithID(err, "certificate", resp, req, http.StatusInternalServerError)
return
}
@ -48,54 +48,54 @@ func caCertReplaceHandler(server *config.Control) http.HandlerFunc {
// validated to confirm that the new certs share a common root with the existing certs, and if so are saved to
// the datastore. If the functions succeeds, servers should be restarted immediately to load the new certs
// from the bootstrap data.
func caCertReplace(server *config.Control, buf io.ReadCloser, force bool) error {
tmpdir, err := os.MkdirTemp(server.DataDir, ".rotate-ca-tmp-")
func caCertReplace(control *config.Control, buf io.ReadCloser, force bool) error {
tmpdir, err := os.MkdirTemp(control.DataDir, ".rotate-ca-tmp-")
if err != nil {
return err
}
defer os.RemoveAll(tmpdir)
runtime := config.NewRuntime(nil)
runtime.EtcdConfig = server.Runtime.EtcdConfig
runtime.ServerToken = server.Runtime.ServerToken
runtime.EtcdConfig = control.Runtime.EtcdConfig
runtime.ServerToken = control.Runtime.ServerToken
tmpServer := &config.Control{
tmpControl := &config.Control{
Runtime: runtime,
Token: server.Token,
Token: control.Token,
DataDir: tmpdir,
}
deps.CreateRuntimeCertFiles(tmpServer)
deps.CreateRuntimeCertFiles(tmpControl)
bootstrapData := bootstrap.PathsDataformat{}
if err := json.NewDecoder(buf).Decode(&bootstrapData); err != nil {
return err
}
if err := bootstrap.WriteToDiskFromStorage(bootstrapData, &tmpServer.Runtime.ControlRuntimeBootstrap); err != nil {
if err := bootstrap.WriteToDiskFromStorage(bootstrapData, &tmpControl.Runtime.ControlRuntimeBootstrap); err != nil {
return err
}
if err := defaultBootstrap(server, tmpServer); err != nil {
if err := defaultBootstrap(control, tmpControl); err != nil {
return errors.Wrap(err, "failed to set default bootstrap values")
}
if err := validateBootstrap(server, tmpServer); err != nil {
if err := validateBootstrap(control, tmpControl); err != nil {
if !force {
return errors.Wrap(err, "failed to validate new CA certificates and keys")
}
logrus.Warnf("Save of CA certificates and keys forced, ignoring validation errors: %v", err)
}
return cluster.Save(context.TODO(), tmpServer, true)
return cluster.Save(context.TODO(), tmpControl, true)
}
// defaultBootstrap provides default values from the existing bootstrap fields
// if the value is not tagged for rotation, or the current value is empty.
func defaultBootstrap(oldServer, newServer *config.Control) error {
func defaultBootstrap(oldControl, newControl *config.Control) error {
errs := []error{}
// Use reflection to iterate over all of the bootstrap fields, checking files at each of the new paths.
oldMeta := reflect.ValueOf(&oldServer.Runtime.ControlRuntimeBootstrap).Elem()
newMeta := reflect.ValueOf(&newServer.Runtime.ControlRuntimeBootstrap).Elem()
oldMeta := reflect.ValueOf(&oldControl.Runtime.ControlRuntimeBootstrap).Elem()
newMeta := reflect.ValueOf(&newControl.Runtime.ControlRuntimeBootstrap).Elem()
// use the existing file if the new file does not exist or is empty
for _, field := range reflect.VisibleFields(oldMeta.Type()) {
@ -122,12 +122,12 @@ func defaultBootstrap(oldServer, newServer *config.Control) error {
// validateBootstrap checks the new certs and keys to ensure that the cluster would function properly were they to be used.
// - The new leaf CA certificates must be verifiable using the same root and intermediate certs as the current leaf CA certificates.
// - The new service account signing key bundle must include the currently active signing key.
func validateBootstrap(oldServer, newServer *config.Control) error {
func validateBootstrap(oldControl, newControl *config.Control) error {
errs := []error{}
// Use reflection to iterate over all of the bootstrap fields, checking files at each of the new paths.
oldMeta := reflect.ValueOf(&oldServer.Runtime.ControlRuntimeBootstrap).Elem()
newMeta := reflect.ValueOf(&newServer.Runtime.ControlRuntimeBootstrap).Elem()
oldMeta := reflect.ValueOf(&oldControl.Runtime.ControlRuntimeBootstrap).Elem()
newMeta := reflect.ValueOf(&newControl.Runtime.ControlRuntimeBootstrap).Elem()
for _, field := range reflect.VisibleFields(oldMeta.Type()) {
// Only handle bootstrap fields tagged for rotation

View File

@ -0,0 +1,400 @@
package handlers
import (
"context"
"crypto"
"crypto/x509"
"fmt"
"net"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/gorilla/mux"
"github.com/k3s-io/k3s/pkg/bootstrap"
"github.com/k3s-io/k3s/pkg/cli/cmds"
"github.com/k3s-io/k3s/pkg/daemons/config"
"github.com/k3s-io/k3s/pkg/etcd"
"github.com/k3s-io/k3s/pkg/nodepassword"
"github.com/k3s-io/k3s/pkg/util"
"github.com/pkg/errors"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/authentication/user"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
)
func CACerts(config *config.Control) http.Handler {
var ca []byte
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
if ca == nil {
var err error
ca, err = os.ReadFile(config.Runtime.ServerCA)
if err != nil {
util.SendError(err, resp, req)
return
}
}
resp.Header().Set("content-type", "text/plain")
resp.Write(ca)
})
}
func Ping() http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
data := []byte("pong")
resp.WriteHeader(http.StatusOK)
resp.Header().Set("Content-Type", "text/plain")
resp.Header().Set("Content-Length", strconv.Itoa(len(data)))
resp.Write(data)
})
}
func ServingKubeletCert(control *config.Control, auth nodepassword.NodeAuthValidator) http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
nodeName, errCode, err := auth(req)
if err != nil {
util.SendError(err, resp, req, errCode)
return
}
keyFile := control.Runtime.ServingKubeletKey
caCerts, caKey, key, err := getCACertAndKeys(control.Runtime.ServerCA, control.Runtime.ServerCAKey, keyFile)
if err != nil {
util.SendError(err, resp, req)
return
}
ips := []net.IP{net.ParseIP("127.0.0.1")}
program := mux.Vars(req)["program"]
if nodeIP := req.Header.Get(program + "-Node-IP"); nodeIP != "" {
for _, v := range strings.Split(nodeIP, ",") {
ip := net.ParseIP(v)
if ip == nil {
util.SendError(fmt.Errorf("invalid node IP address %s", ip), resp, req)
return
}
ips = append(ips, ip)
}
}
cert, err := certutil.NewSignedCert(certutil.Config{
CommonName: nodeName,
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
AltNames: certutil.AltNames{
DNSNames: []string{nodeName, "localhost"},
IPs: ips,
},
}, key, caCerts[0], caKey)
if err != nil {
util.SendError(err, resp, req)
return
}
keyBytes, err := os.ReadFile(keyFile)
if err != nil {
http.Error(resp, err.Error(), http.StatusInternalServerError)
return
}
resp.Write(util.EncodeCertsPEM(cert, caCerts))
resp.Write(keyBytes)
})
}
func ClientKubeletCert(control *config.Control, auth nodepassword.NodeAuthValidator) http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
nodeName, errCode, err := auth(req)
if err != nil {
util.SendError(err, resp, req, errCode)
return
}
keyFile := control.Runtime.ClientKubeletKey
caCerts, caKey, key, err := getCACertAndKeys(control.Runtime.ClientCA, control.Runtime.ClientCAKey, keyFile)
if err != nil {
util.SendError(err, resp, req)
return
}
cert, err := certutil.NewSignedCert(certutil.Config{
CommonName: "system:node:" + nodeName,
Organization: []string{user.NodesGroup},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
}, key, caCerts[0], caKey)
if err != nil {
util.SendError(err, resp, req)
return
}
keyBytes, err := os.ReadFile(keyFile)
if err != nil {
http.Error(resp, err.Error(), http.StatusInternalServerError)
return
}
resp.Write(util.EncodeCertsPEM(cert, caCerts))
resp.Write(keyBytes)
})
}
func ClientKubeProxyCert(control *config.Control) http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
keyFile := control.Runtime.ClientKubeProxyKey
caCerts, caKey, key, err := getCACertAndKeys(control.Runtime.ClientCA, control.Runtime.ClientCAKey, keyFile)
if err != nil {
util.SendError(err, resp, req)
return
}
cert, err := certutil.NewSignedCert(certutil.Config{
CommonName: user.KubeProxy,
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
}, key, caCerts[0], caKey)
if err != nil {
util.SendError(err, resp, req)
return
}
keyBytes, err := os.ReadFile(keyFile)
if err != nil {
http.Error(resp, err.Error(), http.StatusInternalServerError)
return
}
resp.Write(util.EncodeCertsPEM(cert, caCerts))
resp.Write(keyBytes)
})
}
func ClientControllerCert(control *config.Control) http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
keyFile := control.Runtime.ClientK3sControllerKey
caCerts, caKey, key, err := getCACertAndKeys(control.Runtime.ClientCA, control.Runtime.ClientCAKey, keyFile)
if err != nil {
util.SendError(err, resp, req)
return
}
// This user (system:k3s-controller by default) must be bound to a role in rolebindings.yaml or the downstream equivalent
program := mux.Vars(req)["program"]
cert, err := certutil.NewSignedCert(certutil.Config{
CommonName: "system:" + program + "-controller",
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
}, key, caCerts[0], caKey)
if err != nil {
util.SendError(err, resp, req)
return
}
keyBytes, err := os.ReadFile(keyFile)
if err != nil {
http.Error(resp, err.Error(), http.StatusInternalServerError)
return
}
resp.Write(util.EncodeCertsPEM(cert, caCerts))
resp.Write(keyBytes)
})
}
func File(fileName ...string) http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
resp.Header().Set("Content-Type", "text/plain")
if len(fileName) == 1 {
http.ServeFile(resp, req, fileName[0])
return
}
for _, f := range fileName {
bytes, err := os.ReadFile(f)
if err != nil {
util.SendError(errors.Wrapf(err, "failed to read %s", f), resp, req, http.StatusInternalServerError)
return
}
resp.Write(bytes)
}
})
}
func APIServer(control *config.Control, cfg *cmds.Server) http.Handler {
if cfg.DisableAPIServer {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
util.SendError(util.ErrAPIDisabled, resp, req, http.StatusServiceUnavailable)
})
}
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
if control.Runtime != nil && control.Runtime.APIServer != nil {
control.Runtime.APIServer.ServeHTTP(resp, req)
} else {
util.SendError(util.ErrAPINotReady, resp, req, http.StatusServiceUnavailable)
}
})
}
func APIServers(control *config.Control) http.Handler {
collectAddresses := getAddressCollector(control)
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
ctx, cancel := context.WithTimeout(req.Context(), 5*time.Second)
defer cancel()
endpoints := collectAddresses(ctx)
resp.Header().Set("content-type", "application/json")
if err := json.NewEncoder(resp).Encode(endpoints); err != nil {
util.SendError(errors.Wrap(err, "failed to encode apiserver endpoints"), resp, req, http.StatusInternalServerError)
}
})
}
func Config(control *config.Control, cfg *cmds.Server) http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
// Startup hooks may read and modify cmds.Server in a goroutine, but as these are copied into
// config.Control before the startup hooks are called, any modifications need to be sync'd back
// into the struct before it is sent to agents.
// At this time we don't sync all the fields, just those known to be touched by startup hooks.
control.DisableKubeProxy = cfg.DisableKubeProxy
resp.Header().Set("content-type", "application/json")
if err := json.NewEncoder(resp).Encode(control); err != nil {
util.SendError(errors.Wrap(err, "failed to encode agent config"), resp, req, http.StatusInternalServerError)
}
})
}
func Readyz(control *config.Control) http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
if control.Runtime.Core == nil {
util.SendError(util.ErrCoreNotReady, resp, req, http.StatusServiceUnavailable)
return
}
data := []byte("ok")
resp.WriteHeader(http.StatusOK)
resp.Header().Set("Content-Type", "text/plain")
resp.Header().Set("Content-Length", strconv.Itoa(len(data)))
resp.Write(data)
})
}
func Bootstrap(control *config.Control) http.Handler {
if control.Runtime.HTTPBootstrap {
return bootstrap.Handler(&control.Runtime.ControlRuntimeBootstrap)
}
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
logrus.Warnf("Received HTTP bootstrap request from %s, but embedded etcd is not enabled.", req.RemoteAddr)
util.SendError(errors.New("etcd disabled"), resp, req, http.StatusBadRequest)
})
}
func Static(urlPrefix, staticDir string) http.Handler {
return http.StripPrefix(urlPrefix, http.FileServer(http.Dir(staticDir)))
}
func getCACertAndKeys(caCertFile, caKeyFile, signingKeyFile string) ([]*x509.Certificate, crypto.Signer, crypto.Signer, error) {
keyBytes, err := os.ReadFile(signingKeyFile)
if err != nil {
return nil, nil, nil, err
}
key, err := certutil.ParsePrivateKeyPEM(keyBytes)
if err != nil {
return nil, nil, nil, err
}
caKeyBytes, err := os.ReadFile(caKeyFile)
if err != nil {
return nil, nil, nil, err
}
caKey, err := certutil.ParsePrivateKeyPEM(caKeyBytes)
if err != nil {
return nil, nil, nil, err
}
caBytes, err := os.ReadFile(caCertFile)
if err != nil {
return nil, nil, nil, err
}
caCert, err := certutil.ParseCertsPEM(caBytes)
if err != nil {
return nil, nil, nil, err
}
return caCert, caKey.(crypto.Signer), key.(crypto.Signer), nil
}
// addressGetter is a common signature for functions that return an address channel
type addressGetter func(ctx context.Context) <-chan []string
// kubernetesGetter returns a function that returns a channel that can be read to get apiserver addresses from kubernetes endpoints
func kubernetesGetter(control *config.Control) addressGetter {
var endpointsClient typedcorev1.EndpointsInterface
return func(ctx context.Context) <-chan []string {
ch := make(chan []string, 1)
go func() {
if endpointsClient == nil {
if control.Runtime.K8s != nil {
endpointsClient = control.Runtime.K8s.CoreV1().Endpoints(metav1.NamespaceDefault)
}
}
if endpointsClient != nil {
if endpoint, err := endpointsClient.Get(ctx, "kubernetes", metav1.GetOptions{}); err != nil {
logrus.Debugf("Failed to get apiserver addresses from kubernetes: %v", err)
} else {
ch <- util.GetAddresses(endpoint)
}
}
close(ch)
}()
return ch
}
}
// etcdGetter returns a function that returns a channel that can be read to get apiserver addresses from etcd
func etcdGetter(control *config.Control) addressGetter {
return func(ctx context.Context) <-chan []string {
ch := make(chan []string, 1)
go func() {
if addresses, err := etcd.GetAPIServerURLsFromETCD(ctx, control); err != nil {
logrus.Debugf("Failed to get apiserver addresses from etcd: %v", err)
} else {
ch <- addresses
}
close(ch)
}()
return ch
}
}
// getAddressCollector returns a function that can be called to return
// apiserver addresses from both kubernetes and etcd
func getAddressCollector(control *config.Control) func(ctx context.Context) []string {
getFromKubernetes := kubernetesGetter(control)
getFromEtcd := etcdGetter(control)
// read from both kubernetes and etcd in parallel, returning the collected results
return func(ctx context.Context) []string {
a := sets.Set[string]{}
r := []string{}
k8sCh := getFromKubernetes(ctx)
k8sOk := true
etcdCh := getFromEtcd(ctx)
etcdOk := true
for k8sOk || etcdOk {
select {
case r, k8sOk = <-k8sCh:
a.Insert(r...)
case r, etcdOk = <-etcdCh:
a.Insert(r...)
case <-ctx.Done():
return a.UnsortedList()
}
}
return a.UnsortedList()
}
}

View File

@ -0,0 +1,66 @@
package handlers
import (
"context"
"net/http"
"path/filepath"
"github.com/gorilla/mux"
"github.com/k3s-io/k3s/pkg/cli/cmds"
"github.com/k3s-io/k3s/pkg/daemons/config"
"github.com/k3s-io/k3s/pkg/nodepassword"
"github.com/k3s-io/k3s/pkg/server/auth"
"github.com/k3s-io/k3s/pkg/version"
"k8s.io/apiserver/pkg/authentication/user"
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
)
const (
staticURL = "/static/"
)
func NewHandler(ctx context.Context, control *config.Control, cfg *cmds.Server) http.Handler {
nodeAuth := nodepassword.GetNodeAuthValidator(ctx, control)
prefix := "/v1-{program}"
authed := mux.NewRouter().SkipClean(true)
authed.NotFoundHandler = APIServer(control, cfg)
authed.Use(auth.HasRole(control, version.Program+":agent", user.NodesGroup, bootstrapapi.BootstrapDefaultGroup))
authed.Handle(prefix+"/serving-kubelet.crt", ServingKubeletCert(control, nodeAuth))
authed.Handle(prefix+"/client-kubelet.crt", ClientKubeletCert(control, nodeAuth))
authed.Handle(prefix+"/client-kube-proxy.crt", ClientKubeProxyCert(control))
authed.Handle(prefix+"/client-{program}-controller.crt", ClientControllerCert(control))
authed.Handle(prefix+"/client-ca.crt", File(control.Runtime.ClientCA))
authed.Handle(prefix+"/server-ca.crt", File(control.Runtime.ServerCA))
authed.Handle(prefix+"/apiservers", APIServers(control))
authed.Handle(prefix+"/config", Config(control, cfg))
authed.Handle(prefix+"/readyz", Readyz(control))
nodeAuthed := mux.NewRouter().SkipClean(true)
nodeAuthed.NotFoundHandler = authed
nodeAuthed.Use(auth.HasRole(control, user.NodesGroup))
nodeAuthed.Handle(prefix+"/connect", control.Runtime.Tunnel)
serverAuthed := mux.NewRouter().SkipClean(true)
serverAuthed.NotFoundHandler = nodeAuthed
serverAuthed.Use(auth.HasRole(control, version.Program+":server"))
serverAuthed.Handle(prefix+"/encrypt/status", EncryptionStatus(control))
serverAuthed.Handle(prefix+"/encrypt/config", EncryptionConfig(ctx, control))
serverAuthed.Handle(prefix+"/cert/cacerts", CACertReplace(control))
serverAuthed.Handle(prefix+"/server-bootstrap", Bootstrap(control))
serverAuthed.Handle(prefix+"/token", TokenRequest(ctx, control))
systemAuthed := mux.NewRouter().SkipClean(true)
systemAuthed.NotFoundHandler = serverAuthed
systemAuthed.MethodNotAllowedHandler = serverAuthed
systemAuthed.Use(auth.HasRole(control, user.SystemPrivilegedGroup))
systemAuthed.Methods(http.MethodConnect).Handler(control.Runtime.Tunnel)
router := mux.NewRouter().SkipClean(true)
router.NotFoundHandler = systemAuthed
router.PathPrefix(staticURL).Handler(Static(staticURL, filepath.Join(control.DataDir, "static")))
router.Handle("/cacerts", CACerts(control))
router.Handle("/ping", Ping())
return router
}

View File

@ -1,4 +1,4 @@
package server
package handlers
import (
"context"
@ -60,9 +60,9 @@ func getEncryptionRequest(req *http.Request) (*EncryptionRequest, error) {
return result, err
}
func encryptionStatusHandler(server *config.Control) http.Handler {
func EncryptionStatus(control *config.Control) http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
status, err := encryptionStatus(server)
status, err := encryptionStatus(control)
if err != nil {
util.SendErrorWithID(err, "secret-encrypt", resp, req, http.StatusInternalServerError)
return
@ -77,9 +77,9 @@ func encryptionStatusHandler(server *config.Control) http.Handler {
})
}
func encryptionStatus(server *config.Control) (EncryptionState, error) {
func encryptionStatus(control *config.Control) (EncryptionState, error) {
state := EncryptionState{}
providers, err := secretsencrypt.GetEncryptionProviders(server.Runtime)
providers, err := secretsencrypt.GetEncryptionProviders(control.Runtime)
if os.IsNotExist(err) {
return state, nil
} else if err != nil {
@ -87,17 +87,17 @@ func encryptionStatus(server *config.Control) (EncryptionState, error) {
}
if providers[1].Identity != nil && providers[0].AESCBC != nil {
state.Enable = ptr.To(true)
} else if providers[0].Identity != nil && providers[1].AESCBC != nil || !server.EncryptSecrets {
} else if providers[0].Identity != nil && providers[1].AESCBC != nil || !control.EncryptSecrets {
state.Enable = ptr.To(false)
}
if err := verifyEncryptionHashAnnotation(server.Runtime, server.Runtime.Core.Core(), ""); err != nil {
if err := verifyEncryptionHashAnnotation(control.Runtime, control.Runtime.Core.Core(), ""); err != nil {
state.HashMatch = false
state.HashError = err.Error()
} else {
state.HashMatch = true
}
stage, _, err := getEncryptionHashAnnotation(server.Runtime.Core.Core())
stage, _, err := getEncryptionHashAnnotation(control.Runtime.Core.Core())
if err != nil {
return state, err
}
@ -122,21 +122,21 @@ func encryptionStatus(server *config.Control) (EncryptionState, error) {
return state, nil
}
func encryptionEnable(ctx context.Context, server *config.Control, enable bool) error {
providers, err := secretsencrypt.GetEncryptionProviders(server.Runtime)
func encryptionEnable(ctx context.Context, control *config.Control, enable bool) error {
providers, err := secretsencrypt.GetEncryptionProviders(control.Runtime)
if err != nil {
return err
}
if len(providers) > 2 {
return fmt.Errorf("more than 2 providers (%d) found in secrets encryption", len(providers))
}
curKeys, err := secretsencrypt.GetEncryptionKeys(server.Runtime, false)
curKeys, err := secretsencrypt.GetEncryptionKeys(control.Runtime, false)
if err != nil {
return err
}
if providers[1].Identity != nil && providers[0].AESCBC != nil && !enable {
logrus.Infoln("Disabling secrets encryption")
if err := secretsencrypt.WriteEncryptionConfig(server.Runtime, curKeys, enable); err != nil {
if err := secretsencrypt.WriteEncryptionConfig(control.Runtime, curKeys, enable); err != nil {
return err
}
} else if !enable {
@ -144,7 +144,7 @@ func encryptionEnable(ctx context.Context, server *config.Control, enable bool)
return nil
} else if providers[0].Identity != nil && providers[1].AESCBC != nil && enable {
logrus.Infoln("Enabling secrets encryption")
if err := secretsencrypt.WriteEncryptionConfig(server.Runtime, curKeys, enable); err != nil {
if err := secretsencrypt.WriteEncryptionConfig(control.Runtime, curKeys, enable); err != nil {
return err
}
} else if enable {
@ -153,13 +153,13 @@ func encryptionEnable(ctx context.Context, server *config.Control, enable bool)
} else {
return fmt.Errorf("unable to enable/disable secrets encryption, unknown configuration")
}
if err := cluster.Save(ctx, server, true); err != nil {
if err := cluster.Save(ctx, control, true); err != nil {
return err
}
return reencryptAndRemoveKey(ctx, server, true, os.Getenv("NODE_NAME"))
return reencryptAndRemoveKey(ctx, control, true, os.Getenv("NODE_NAME"))
}
func encryptionConfigHandler(ctx context.Context, server *config.Control) http.Handler {
func EncryptionConfig(ctx context.Context, control *config.Control) http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
if req.Method != http.MethodPut {
util.SendError(fmt.Errorf("method not allowed"), resp, req, http.StatusMethodNotAllowed)
@ -173,18 +173,18 @@ func encryptionConfigHandler(ctx context.Context, server *config.Control) http.H
if encryptReq.Stage != nil {
switch *encryptReq.Stage {
case secretsencrypt.EncryptionPrepare:
err = encryptionPrepare(ctx, server, encryptReq.Force)
err = encryptionPrepare(ctx, control, encryptReq.Force)
case secretsencrypt.EncryptionRotate:
err = encryptionRotate(ctx, server, encryptReq.Force)
err = encryptionRotate(ctx, control, encryptReq.Force)
case secretsencrypt.EncryptionRotateKeys:
err = encryptionRotateKeys(ctx, server)
err = encryptionRotateKeys(ctx, control)
case secretsencrypt.EncryptionReencryptActive:
err = encryptionReencrypt(ctx, server, encryptReq.Force, encryptReq.Skip)
err = encryptionReencrypt(ctx, control, encryptReq.Force, encryptReq.Skip)
default:
err = fmt.Errorf("unknown stage %s requested", *encryptReq.Stage)
}
} else if encryptReq.Enable != nil {
err = encryptionEnable(ctx, server, *encryptReq.Enable)
err = encryptionEnable(ctx, control, *encryptReq.Enable)
}
if err != nil {
@ -199,13 +199,13 @@ func encryptionConfigHandler(ctx context.Context, server *config.Control) http.H
})
}
func encryptionPrepare(ctx context.Context, server *config.Control, force bool) error {
func encryptionPrepare(ctx context.Context, control *config.Control, force bool) error {
states := secretsencrypt.EncryptionStart + "-" + secretsencrypt.EncryptionReencryptFinished
if err := verifyEncryptionHashAnnotation(server.Runtime, server.Runtime.Core.Core(), states); err != nil && !force {
if err := verifyEncryptionHashAnnotation(control.Runtime, control.Runtime.Core.Core(), states); err != nil && !force {
return err
}
curKeys, err := secretsencrypt.GetEncryptionKeys(server.Runtime, false)
curKeys, err := secretsencrypt.GetEncryptionKeys(control.Runtime, false)
if err != nil {
return err
}
@ -215,29 +215,29 @@ func encryptionPrepare(ctx context.Context, server *config.Control, force bool)
}
logrus.Infoln("Adding secrets-encryption key: ", curKeys[len(curKeys)-1])
if err := secretsencrypt.WriteEncryptionConfig(server.Runtime, curKeys, true); err != nil {
if err := secretsencrypt.WriteEncryptionConfig(control.Runtime, curKeys, true); err != nil {
return err
}
nodeName := os.Getenv("NODE_NAME")
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
node, err := server.Runtime.Core.Core().V1().Node().Get(nodeName, metav1.GetOptions{})
node, err := control.Runtime.Core.Core().V1().Node().Get(nodeName, metav1.GetOptions{})
if err != nil {
return err
}
return secretsencrypt.WriteEncryptionHashAnnotation(server.Runtime, node, false, secretsencrypt.EncryptionPrepare)
return secretsencrypt.WriteEncryptionHashAnnotation(control.Runtime, node, false, secretsencrypt.EncryptionPrepare)
})
if err != nil {
return err
}
return cluster.Save(ctx, server, true)
return cluster.Save(ctx, control, true)
}
func encryptionRotate(ctx context.Context, server *config.Control, force bool) error {
if err := verifyEncryptionHashAnnotation(server.Runtime, server.Runtime.Core.Core(), secretsencrypt.EncryptionPrepare); err != nil && !force {
func encryptionRotate(ctx context.Context, control *config.Control, force bool) error {
if err := verifyEncryptionHashAnnotation(control.Runtime, control.Runtime.Core.Core(), secretsencrypt.EncryptionPrepare); err != nil && !force {
return err
}
curKeys, err := secretsencrypt.GetEncryptionKeys(server.Runtime, false)
curKeys, err := secretsencrypt.GetEncryptionKeys(control.Runtime, false)
if err != nil {
return err
}
@ -245,49 +245,49 @@ func encryptionRotate(ctx context.Context, server *config.Control, force bool) e
// Right rotate elements
rotatedKeys := append(curKeys[len(curKeys)-1:], curKeys[:len(curKeys)-1]...)
if err = secretsencrypt.WriteEncryptionConfig(server.Runtime, rotatedKeys, true); err != nil {
if err = secretsencrypt.WriteEncryptionConfig(control.Runtime, rotatedKeys, true); err != nil {
return err
}
logrus.Infoln("Encryption keys right rotated")
nodeName := os.Getenv("NODE_NAME")
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
node, err := server.Runtime.Core.Core().V1().Node().Get(nodeName, metav1.GetOptions{})
node, err := control.Runtime.Core.Core().V1().Node().Get(nodeName, metav1.GetOptions{})
if err != nil {
return err
}
return secretsencrypt.WriteEncryptionHashAnnotation(server.Runtime, node, false, secretsencrypt.EncryptionRotate)
return secretsencrypt.WriteEncryptionHashAnnotation(control.Runtime, node, false, secretsencrypt.EncryptionRotate)
})
if err != nil {
return err
}
return cluster.Save(ctx, server, true)
return cluster.Save(ctx, control, true)
}
func encryptionReencrypt(ctx context.Context, server *config.Control, force bool, skip bool) error {
if err := verifyEncryptionHashAnnotation(server.Runtime, server.Runtime.Core.Core(), secretsencrypt.EncryptionRotate); err != nil && !force {
func encryptionReencrypt(ctx context.Context, control *config.Control, force bool, skip bool) error {
if err := verifyEncryptionHashAnnotation(control.Runtime, control.Runtime.Core.Core(), secretsencrypt.EncryptionRotate); err != nil && !force {
return err
}
// Set the reencrypt-active annotation so other nodes know we are in the process of reencrypting.
// As this stage is not persisted, we do not write the annotation to file
nodeName := os.Getenv("NODE_NAME")
if err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
node, err := server.Runtime.Core.Core().V1().Node().Get(nodeName, metav1.GetOptions{})
node, err := control.Runtime.Core.Core().V1().Node().Get(nodeName, metav1.GetOptions{})
if err != nil {
return err
}
return secretsencrypt.WriteEncryptionHashAnnotation(server.Runtime, node, true, secretsencrypt.EncryptionReencryptActive)
return secretsencrypt.WriteEncryptionHashAnnotation(control.Runtime, node, true, secretsencrypt.EncryptionReencryptActive)
}); err != nil {
return err
}
// We use a timeout of 10s for the reencrypt call, so finish the process as a go routine and return immediately.
// No errors are returned to the user via CLI, any errors will be logged on the server
go reencryptAndRemoveKey(ctx, server, skip, nodeName)
go reencryptAndRemoveKey(ctx, control, skip, nodeName)
return nil
}
func addAndRotateKeys(server *config.Control) error {
curKeys, err := secretsencrypt.GetEncryptionKeys(server.Runtime, false)
func addAndRotateKeys(control *config.Control) error {
curKeys, err := secretsencrypt.GetEncryptionKeys(control.Runtime, false)
if err != nil {
return err
}
@ -297,29 +297,29 @@ func addAndRotateKeys(server *config.Control) error {
}
logrus.Infoln("Adding secrets-encryption key: ", curKeys[len(curKeys)-1])
if err := secretsencrypt.WriteEncryptionConfig(server.Runtime, curKeys, true); err != nil {
if err := secretsencrypt.WriteEncryptionConfig(control.Runtime, curKeys, true); err != nil {
return err
}
// Right rotate elements
rotatedKeys := append(curKeys[len(curKeys)-1:], curKeys[:len(curKeys)-1]...)
logrus.Infoln("Rotating secrets-encryption keys")
return secretsencrypt.WriteEncryptionConfig(server.Runtime, rotatedKeys, true)
return secretsencrypt.WriteEncryptionConfig(control.Runtime, rotatedKeys, true)
}
// encryptionRotateKeys is both adds and rotates keys, and sets the annotaiton that triggers the
// reencryption process. It is the preferred way to rotate keys, starting with v1.28
func encryptionRotateKeys(ctx context.Context, server *config.Control) error {
func encryptionRotateKeys(ctx context.Context, control *config.Control) error {
states := secretsencrypt.EncryptionStart + "-" + secretsencrypt.EncryptionReencryptFinished
if err := verifyEncryptionHashAnnotation(server.Runtime, server.Runtime.Core.Core(), states); err != nil {
if err := verifyEncryptionHashAnnotation(control.Runtime, control.Runtime.Core.Core(), states); err != nil {
return err
}
if err := verifyRotateKeysSupport(server.Runtime.Core.Core()); err != nil {
if err := verifyRotateKeysSupport(control.Runtime.Core.Core()); err != nil {
return err
}
reloadTime, reloadSuccesses, err := secretsencrypt.GetEncryptionConfigMetrics(server.Runtime, true)
reloadTime, reloadSuccesses, err := secretsencrypt.GetEncryptionConfigMetrics(control.Runtime, true)
if err != nil {
return err
}
@ -328,72 +328,72 @@ func encryptionRotateKeys(ctx context.Context, server *config.Control) error {
// As this stage is not persisted, we do not write the annotation to file
nodeName := os.Getenv("NODE_NAME")
if err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
node, err := server.Runtime.Core.Core().V1().Node().Get(nodeName, metav1.GetOptions{})
node, err := control.Runtime.Core.Core().V1().Node().Get(nodeName, metav1.GetOptions{})
if err != nil {
return err
}
return secretsencrypt.WriteEncryptionHashAnnotation(server.Runtime, node, true, secretsencrypt.EncryptionReencryptActive)
return secretsencrypt.WriteEncryptionHashAnnotation(control.Runtime, node, true, secretsencrypt.EncryptionReencryptActive)
}); err != nil {
return err
}
if err := addAndRotateKeys(server); err != nil {
if err := addAndRotateKeys(control); err != nil {
return err
}
if err := secretsencrypt.WaitForEncryptionConfigReload(server.Runtime, reloadSuccesses, reloadTime); err != nil {
if err := secretsencrypt.WaitForEncryptionConfigReload(control.Runtime, reloadSuccesses, reloadTime); err != nil {
return err
}
return reencryptAndRemoveKey(ctx, server, false, nodeName)
return reencryptAndRemoveKey(ctx, control, false, nodeName)
}
func reencryptAndRemoveKey(ctx context.Context, server *config.Control, skip bool, nodeName string) error {
if err := updateSecrets(ctx, server, nodeName); err != nil {
func reencryptAndRemoveKey(ctx context.Context, control *config.Control, skip bool, nodeName string) error {
if err := updateSecrets(ctx, control, nodeName); err != nil {
return err
}
// If skipping, revert back to the previous stage and do not remove the key
if skip {
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
node, err := server.Runtime.Core.Core().V1().Node().Get(nodeName, metav1.GetOptions{})
node, err := control.Runtime.Core.Core().V1().Node().Get(nodeName, metav1.GetOptions{})
if err != nil {
return err
}
secretsencrypt.BootstrapEncryptionHashAnnotation(node, server.Runtime)
_, err = server.Runtime.Core.Core().V1().Node().Update(node)
secretsencrypt.BootstrapEncryptionHashAnnotation(node, control.Runtime)
_, err = control.Runtime.Core.Core().V1().Node().Update(node)
return err
})
return err
}
// Remove last key
curKeys, err := secretsencrypt.GetEncryptionKeys(server.Runtime, false)
curKeys, err := secretsencrypt.GetEncryptionKeys(control.Runtime, false)
if err != nil {
return err
}
logrus.Infoln("Removing key: ", curKeys[len(curKeys)-1])
curKeys = curKeys[:len(curKeys)-1]
if err = secretsencrypt.WriteEncryptionConfig(server.Runtime, curKeys, true); err != nil {
if err = secretsencrypt.WriteEncryptionConfig(control.Runtime, curKeys, true); err != nil {
return err
}
if err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
node, err := server.Runtime.Core.Core().V1().Node().Get(nodeName, metav1.GetOptions{})
node, err := control.Runtime.Core.Core().V1().Node().Get(nodeName, metav1.GetOptions{})
if err != nil {
return err
}
return secretsencrypt.WriteEncryptionHashAnnotation(server.Runtime, node, false, secretsencrypt.EncryptionReencryptFinished)
return secretsencrypt.WriteEncryptionHashAnnotation(control.Runtime, node, false, secretsencrypt.EncryptionReencryptFinished)
}); err != nil {
return err
}
return cluster.Save(ctx, server, true)
return cluster.Save(ctx, control, true)
}
func updateSecrets(ctx context.Context, server *config.Control, nodeName string) error {
k8s := server.Runtime.K8s
func updateSecrets(ctx context.Context, control *config.Control, nodeName string) error {
k8s := control.Runtime.K8s
nodeRef := &corev1.ObjectReference{
Kind: "Node",
Name: nodeName,

View File

@ -1,4 +1,4 @@
package server
package handlers
import (
"context"
@ -6,8 +6,10 @@ import (
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"github.com/k3s-io/k3s/pkg/clientaccess"
"github.com/k3s-io/k3s/pkg/cluster"
"github.com/k3s-io/k3s/pkg/daemons/config"
"github.com/k3s-io/k3s/pkg/passwd"
@ -30,7 +32,7 @@ func getServerTokenRequest(req *http.Request) (TokenRotateRequest, error) {
return result, err
}
func tokenRequestHandler(ctx context.Context, server *config.Control) http.Handler {
func TokenRequest(ctx context.Context, control *config.Control) http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
if req.Method != http.MethodPut {
util.SendError(fmt.Errorf("method not allowed"), resp, req, http.StatusMethodNotAllowed)
@ -43,7 +45,7 @@ func tokenRequestHandler(ctx context.Context, server *config.Control) http.Handl
util.SendError(err, resp, req, http.StatusBadRequest)
return
}
if err = tokenRotate(ctx, server, *sTokenReq.NewToken); err != nil {
if err = tokenRotate(ctx, control, *sTokenReq.NewToken); err != nil {
util.SendErrorWithID(err, "token", resp, req, http.StatusInternalServerError)
return
}
@ -51,8 +53,20 @@ func tokenRequestHandler(ctx context.Context, server *config.Control) http.Handl
})
}
func tokenRotate(ctx context.Context, server *config.Control, newToken string) error {
passwd, err := passwd.Read(server.Runtime.PasswdFile)
func WriteToken(token, file, certs string) error {
if len(token) == 0 {
return nil
}
token, err := clientaccess.FormatToken(token, certs)
if err != nil {
return err
}
return os.WriteFile(file, []byte(token+"\n"), 0600)
}
func tokenRotate(ctx context.Context, control *config.Control, newToken string) error {
passwd, err := passwd.Read(control.Runtime.PasswdFile)
if err != nil {
return err
}
@ -76,24 +90,24 @@ func tokenRotate(ctx context.Context, server *config.Control, newToken string) e
}
// If the agent token is the same a server, we need to change both
if agentToken, found := passwd.Pass("node"); found && agentToken == oldToken && server.AgentToken == "" {
if agentToken, found := passwd.Pass("node"); found && agentToken == oldToken && control.AgentToken == "" {
if err := passwd.EnsureUser("node", version.Program+":agent", newToken); err != nil {
return err
}
}
if err := passwd.Write(server.Runtime.PasswdFile); err != nil {
if err := passwd.Write(control.Runtime.PasswdFile); err != nil {
return err
}
serverTokenFile := filepath.Join(server.DataDir, "token")
if err := writeToken("server:"+newToken, serverTokenFile, server.Runtime.ServerCA); err != nil {
serverTokenFile := filepath.Join(control.DataDir, "token")
if err := WriteToken("server:"+newToken, serverTokenFile, control.Runtime.ServerCA); err != nil {
return err
}
if err := cluster.RotateBootstrapToken(ctx, server, oldToken); err != nil {
if err := cluster.RotateBootstrapToken(ctx, control, oldToken); err != nil {
return err
}
server.Token = newToken
return cluster.Save(ctx, server, true)
control.Token = newToken
return cluster.Save(ctx, control, true)
}

View File

@ -1,595 +0,0 @@
package server
import (
"context"
"crypto"
"crypto/x509"
"fmt"
"net"
"net/http"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/gorilla/mux"
"github.com/k3s-io/k3s/pkg/bootstrap"
"github.com/k3s-io/k3s/pkg/cli/cmds"
"github.com/k3s-io/k3s/pkg/daemons/config"
"github.com/k3s-io/k3s/pkg/etcd"
"github.com/k3s-io/k3s/pkg/nodepassword"
"github.com/k3s-io/k3s/pkg/server/auth"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/version"
"github.com/pkg/errors"
certutil "github.com/rancher/dynamiclistener/cert"
coreclient "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/endpoints/request"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
"k8s.io/kubernetes/pkg/auth/nodeidentifier"
)
const (
staticURL = "/static/"
)
var (
identifier = nodeidentifier.NewDefaultNodeIdentifier()
)
func router(ctx context.Context, config *Config, cfg *cmds.Server) http.Handler {
serverConfig := &config.ControlConfig
nodeAuth := passwordBootstrap(ctx, config)
prefix := "/v1-" + version.Program
authed := mux.NewRouter().SkipClean(true)
authed.Use(auth.HasRole(serverConfig, version.Program+":agent", user.NodesGroup, bootstrapapi.BootstrapDefaultGroup))
authed.Path(prefix + "/serving-kubelet.crt").Handler(servingKubeletCert(serverConfig, serverConfig.Runtime.ServingKubeletKey, nodeAuth))
authed.Path(prefix + "/client-kubelet.crt").Handler(clientKubeletCert(serverConfig, serverConfig.Runtime.ClientKubeletKey, nodeAuth))
authed.Path(prefix + "/client-kube-proxy.crt").Handler(fileHandler(serverConfig.Runtime.ClientKubeProxyCert, serverConfig.Runtime.ClientKubeProxyKey))
authed.Path(prefix + "/client-" + version.Program + "-controller.crt").Handler(fileHandler(serverConfig.Runtime.ClientK3sControllerCert, serverConfig.Runtime.ClientK3sControllerKey))
authed.Path(prefix + "/client-ca.crt").Handler(fileHandler(serverConfig.Runtime.ClientCA))
authed.Path(prefix + "/server-ca.crt").Handler(fileHandler(serverConfig.Runtime.ServerCA))
authed.Path(prefix + "/apiservers").Handler(apiserversHandler(serverConfig))
authed.Path(prefix + "/config").Handler(configHandler(serverConfig, cfg))
authed.Path(prefix + "/readyz").Handler(readyzHandler(serverConfig))
if cfg.DisableAPIServer {
authed.NotFoundHandler = apiserverDisabled()
} else {
authed.NotFoundHandler = apiserver(serverConfig.Runtime)
}
nodeAuthed := mux.NewRouter().SkipClean(true)
nodeAuthed.NotFoundHandler = authed
nodeAuthed.Use(auth.HasRole(serverConfig, user.NodesGroup))
nodeAuthed.Path(prefix + "/connect").Handler(serverConfig.Runtime.Tunnel)
serverAuthed := mux.NewRouter().SkipClean(true)
serverAuthed.NotFoundHandler = nodeAuthed
serverAuthed.Use(auth.HasRole(serverConfig, version.Program+":server"))
serverAuthed.Path(prefix + "/encrypt/status").Handler(encryptionStatusHandler(serverConfig))
serverAuthed.Path(prefix + "/encrypt/config").Handler(encryptionConfigHandler(ctx, serverConfig))
serverAuthed.Path(prefix + "/cert/cacerts").Handler(caCertReplaceHandler(serverConfig))
serverAuthed.Path(prefix + "/server-bootstrap").Handler(bootstrapHandler(serverConfig.Runtime))
serverAuthed.Path(prefix + "/token").Handler(tokenRequestHandler(ctx, serverConfig))
systemAuthed := mux.NewRouter().SkipClean(true)
systemAuthed.NotFoundHandler = serverAuthed
systemAuthed.MethodNotAllowedHandler = serverAuthed
systemAuthed.Use(auth.HasRole(serverConfig, user.SystemPrivilegedGroup))
systemAuthed.Methods(http.MethodConnect).Handler(serverConfig.Runtime.Tunnel)
staticDir := filepath.Join(serverConfig.DataDir, "static")
router := mux.NewRouter().SkipClean(true)
router.NotFoundHandler = systemAuthed
router.PathPrefix(staticURL).Handler(serveStatic(staticURL, staticDir))
router.Path("/cacerts").Handler(cacerts(serverConfig.Runtime.ServerCA))
router.Path("/ping").Handler(ping())
return router
}
func apiserver(runtime *config.ControlRuntime) http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
if runtime != nil && runtime.APIServer != nil {
runtime.APIServer.ServeHTTP(resp, req)
} else {
util.SendError(util.ErrAPINotReady, resp, req, http.StatusServiceUnavailable)
}
})
}
func apiserverDisabled() http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
util.SendError(util.ErrAPIDisabled, resp, req, http.StatusServiceUnavailable)
})
}
func bootstrapHandler(runtime *config.ControlRuntime) http.Handler {
if runtime.HTTPBootstrap {
return bootstrap.Handler(&runtime.ControlRuntimeBootstrap)
}
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
logrus.Warnf("Received HTTP bootstrap request from %s, but embedded etcd is not enabled.", req.RemoteAddr)
util.SendError(errors.New("etcd disabled"), resp, req, http.StatusBadRequest)
})
}
func cacerts(serverCA string) http.Handler {
var ca []byte
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
if ca == nil {
var err error
ca, err = os.ReadFile(serverCA)
if err != nil {
util.SendError(err, resp, req)
return
}
}
resp.Header().Set("content-type", "text/plain")
resp.Write(ca)
})
}
func getNodeInfo(req *http.Request) (*nodeInfo, error) {
user, ok := request.UserFrom(req.Context())
if !ok {
return nil, errors.New("auth user not set")
}
nodeName := req.Header.Get(version.Program + "-Node-Name")
if nodeName == "" {
return nil, errors.New("node name not set")
}
nodePassword := req.Header.Get(version.Program + "-Node-Password")
if nodePassword == "" {
return nil, errors.New("node password not set")
}
return &nodeInfo{
Name: strings.ToLower(nodeName),
Password: nodePassword,
User: user,
}, nil
}
func getCACertAndKeys(caCertFile, caKeyFile, signingKeyFile string) ([]*x509.Certificate, crypto.Signer, crypto.Signer, error) {
keyBytes, err := os.ReadFile(signingKeyFile)
if err != nil {
return nil, nil, nil, err
}
key, err := certutil.ParsePrivateKeyPEM(keyBytes)
if err != nil {
return nil, nil, nil, err
}
caKeyBytes, err := os.ReadFile(caKeyFile)
if err != nil {
return nil, nil, nil, err
}
caKey, err := certutil.ParsePrivateKeyPEM(caKeyBytes)
if err != nil {
return nil, nil, nil, err
}
caBytes, err := os.ReadFile(caCertFile)
if err != nil {
return nil, nil, nil, err
}
caCert, err := certutil.ParseCertsPEM(caBytes)
if err != nil {
return nil, nil, nil, err
}
return caCert, caKey.(crypto.Signer), key.(crypto.Signer), nil
}
func servingKubeletCert(server *config.Control, keyFile string, auth nodePassBootstrapper) http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
nodeName, errCode, err := auth(req)
if err != nil {
util.SendError(err, resp, req, errCode)
return
}
caCerts, caKey, key, err := getCACertAndKeys(server.Runtime.ServerCA, server.Runtime.ServerCAKey, server.Runtime.ServingKubeletKey)
if err != nil {
util.SendError(err, resp, req)
return
}
ips := []net.IP{net.ParseIP("127.0.0.1")}
if nodeIP := req.Header.Get(version.Program + "-Node-IP"); nodeIP != "" {
for _, v := range strings.Split(nodeIP, ",") {
ip := net.ParseIP(v)
if ip == nil {
util.SendError(fmt.Errorf("invalid node IP address %s", ip), resp, req)
return
}
ips = append(ips, ip)
}
}
cert, err := certutil.NewSignedCert(certutil.Config{
CommonName: nodeName,
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
AltNames: certutil.AltNames{
DNSNames: []string{nodeName, "localhost"},
IPs: ips,
},
}, key, caCerts[0], caKey)
if err != nil {
util.SendError(err, resp, req)
return
}
keyBytes, err := os.ReadFile(keyFile)
if err != nil {
http.Error(resp, err.Error(), http.StatusInternalServerError)
return
}
resp.Write(util.EncodeCertsPEM(cert, caCerts))
resp.Write(keyBytes)
})
}
func clientKubeletCert(server *config.Control, keyFile string, auth nodePassBootstrapper) http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
nodeName, errCode, err := auth(req)
if err != nil {
util.SendError(err, resp, req, errCode)
return
}
caCerts, caKey, key, err := getCACertAndKeys(server.Runtime.ClientCA, server.Runtime.ClientCAKey, server.Runtime.ClientKubeletKey)
if err != nil {
util.SendError(err, resp, req)
return
}
cert, err := certutil.NewSignedCert(certutil.Config{
CommonName: "system:node:" + nodeName,
Organization: []string{user.NodesGroup},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
}, key, caCerts[0], caKey)
if err != nil {
util.SendError(err, resp, req)
return
}
keyBytes, err := os.ReadFile(keyFile)
if err != nil {
http.Error(resp, err.Error(), http.StatusInternalServerError)
return
}
resp.Write(util.EncodeCertsPEM(cert, caCerts))
resp.Write(keyBytes)
})
}
func fileHandler(fileName ...string) http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
resp.Header().Set("Content-Type", "text/plain")
if len(fileName) == 1 {
http.ServeFile(resp, req, fileName[0])
return
}
for _, f := range fileName {
bytes, err := os.ReadFile(f)
if err != nil {
util.SendError(errors.Wrapf(err, "failed to read %s", f), resp, req, http.StatusInternalServerError)
return
}
resp.Write(bytes)
}
})
}
// apiserversHandler returns a list of apiserver addresses.
// It attempts to merge results from both the apiserver and directly from etcd,
// in case we are recovering from an apiserver outage that rendered the endpoint list unavailable.
func apiserversHandler(server *config.Control) http.Handler {
collectAddresses := getAddressCollector(server)
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
ctx, cancel := context.WithTimeout(req.Context(), 5*time.Second)
defer cancel()
endpoints := collectAddresses(ctx)
resp.Header().Set("content-type", "application/json")
if err := json.NewEncoder(resp).Encode(endpoints); err != nil {
util.SendError(errors.Wrap(err, "failed to encode apiserver endpoints"), resp, req, http.StatusInternalServerError)
}
})
}
func configHandler(server *config.Control, cfg *cmds.Server) http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
// Startup hooks may read and modify cmds.Server in a goroutine, but as these are copied into
// config.Control before the startup hooks are called, any modifications need to be sync'd back
// into the struct before it is sent to agents.
// At this time we don't sync all the fields, just those known to be touched by startup hooks.
server.DisableKubeProxy = cfg.DisableKubeProxy
resp.Header().Set("content-type", "application/json")
if err := json.NewEncoder(resp).Encode(server); err != nil {
util.SendError(errors.Wrap(err, "failed to encode agent config"), resp, req, http.StatusInternalServerError)
}
})
}
func readyzHandler(server *config.Control) http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
if server.Runtime.Core == nil {
util.SendError(util.ErrCoreNotReady, resp, req, http.StatusServiceUnavailable)
return
}
data := []byte("ok")
resp.WriteHeader(http.StatusOK)
resp.Header().Set("Content-Type", "text/plain")
resp.Header().Set("Content-Length", strconv.Itoa(len(data)))
resp.Write(data)
})
}
func ping() http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
data := []byte("pong")
resp.WriteHeader(http.StatusOK)
resp.Header().Set("Content-Type", "text/plain")
resp.Header().Set("Content-Length", strconv.Itoa(len(data)))
resp.Write(data)
})
}
func serveStatic(urlPrefix, staticDir string) http.Handler {
return http.StripPrefix(urlPrefix, http.FileServer(http.Dir(staticDir)))
}
// nodePassBootstrapper returns a node name, or http error code and error
type nodePassBootstrapper func(req *http.Request) (string, int, error)
// nodeInfo contains information on the requesting node, derived from auth creds
// and request headers.
type nodeInfo struct {
Name string
Password string
User user.Info
}
func passwordBootstrap(ctx context.Context, config *Config) nodePassBootstrapper {
runtime := config.ControlConfig.Runtime
deferredNodes := map[string]bool{}
var secretClient coreclient.SecretController
var nodeClient coreclient.NodeController
var mu sync.Mutex
return nodePassBootstrapper(func(req *http.Request) (string, int, error) {
node, err := getNodeInfo(req)
if err != nil {
return "", http.StatusBadRequest, err
}
nodeName, isNodeAuth := identifier.NodeIdentity(node.User)
if isNodeAuth && nodeName != node.Name {
return "", http.StatusBadRequest, errors.New("header node name does not match auth node name")
}
if secretClient == nil || nodeClient == nil {
if runtime.Core != nil {
// initialize the client if we can
secretClient = runtime.Core.Core().V1().Secret()
nodeClient = runtime.Core.Core().V1().Node()
} else if node.Name == os.Getenv("NODE_NAME") {
// If we're verifying our own password, verify it locally and ensure a secret later.
return verifyLocalPassword(ctx, config, &mu, deferredNodes, node)
} else if config.ControlConfig.DisableAPIServer && !isNodeAuth {
// If we're running on an etcd-only node, and the request didn't use Node Identity auth,
// defer node password verification until an apiserver joins the cluster.
return verifyRemotePassword(ctx, config, &mu, deferredNodes, node)
} else {
// Otherwise, reject the request until the core is ready.
return "", http.StatusServiceUnavailable, util.ErrCoreNotReady
}
}
// verify that the node exists, if using Node Identity auth
if err := verifyNode(ctx, nodeClient, node); err != nil {
return "", http.StatusUnauthorized, err
}
// verify that the node password secret matches, or create it if it does not
if err := nodepassword.Ensure(secretClient, node.Name, node.Password); err != nil {
// if the verification failed, reject the request
if errors.Is(err, nodepassword.ErrVerifyFailed) {
return "", http.StatusForbidden, err
}
// If verification failed due to an error creating the node password secret, allow
// the request, but retry verification until the outage is resolved. This behavior
// allows nodes to join the cluster during outages caused by validating webhooks
// blocking secret creation - if the outage requires new nodes to join in order to
// run the webhook pods, we must fail open here to resolve the outage.
return verifyRemotePassword(ctx, config, &mu, deferredNodes, node)
}
return node.Name, http.StatusOK, nil
})
}
func verifyLocalPassword(ctx context.Context, config *Config, mu *sync.Mutex, deferredNodes map[string]bool, node *nodeInfo) (string, int, error) {
// do not attempt to verify the node password if the local host is not running an agent and does not have a node resource.
if config.DisableAgent {
return node.Name, http.StatusOK, nil
}
// use same password file location that the agent creates
nodePasswordRoot := "/"
if config.ControlConfig.Rootless {
nodePasswordRoot = filepath.Join(path.Dir(config.ControlConfig.DataDir), "agent")
}
nodeConfigPath := filepath.Join(nodePasswordRoot, "etc", "rancher", "node")
nodePasswordFile := filepath.Join(nodeConfigPath, "password")
passBytes, err := os.ReadFile(nodePasswordFile)
if err != nil {
return "", http.StatusInternalServerError, errors.Wrap(err, "unable to read node password file")
}
passHash, err := nodepassword.Hasher.CreateHash(strings.TrimSpace(string(passBytes)))
if err != nil {
return "", http.StatusInternalServerError, errors.Wrap(err, "unable to hash node password file")
}
if err := nodepassword.Hasher.VerifyHash(passHash, node.Password); err != nil {
return "", http.StatusForbidden, errors.Wrap(err, "unable to verify local node password")
}
mu.Lock()
defer mu.Unlock()
if _, ok := deferredNodes[node.Name]; !ok {
deferredNodes[node.Name] = true
go ensureSecret(ctx, config, node)
logrus.Infof("Password verified locally for node %s", node.Name)
}
return node.Name, http.StatusOK, nil
}
func verifyRemotePassword(ctx context.Context, config *Config, mu *sync.Mutex, deferredNodes map[string]bool, node *nodeInfo) (string, int, error) {
mu.Lock()
defer mu.Unlock()
if _, ok := deferredNodes[node.Name]; !ok {
deferredNodes[node.Name] = true
go ensureSecret(ctx, config, node)
logrus.Infof("Password verification deferred for node %s", node.Name)
}
return node.Name, http.StatusOK, nil
}
func verifyNode(ctx context.Context, nodeClient coreclient.NodeController, node *nodeInfo) error {
if nodeName, isNodeAuth := identifier.NodeIdentity(node.User); isNodeAuth {
if _, err := nodeClient.Cache().Get(nodeName); err != nil {
return errors.Wrap(err, "unable to verify node identity")
}
}
return nil
}
func ensureSecret(ctx context.Context, config *Config, node *nodeInfo) {
runtime := config.ControlConfig.Runtime
_ = wait.PollUntilContextCancel(ctx, time.Second*5, true, func(ctx context.Context) (bool, error) {
if runtime.Core != nil {
secretClient := runtime.Core.Core().V1().Secret()
// This is consistent with events attached to the node generated by the kubelet
// https://github.com/kubernetes/kubernetes/blob/612130dd2f4188db839ea5c2dea07a96b0ad8d1c/pkg/kubelet/kubelet.go#L479-L485
nodeRef := &corev1.ObjectReference{
Kind: "Node",
Name: node.Name,
UID: types.UID(node.Name),
Namespace: "",
}
if err := nodepassword.Ensure(secretClient, node.Name, node.Password); err != nil {
runtime.Event.Eventf(nodeRef, corev1.EventTypeWarning, "NodePasswordValidationFailed", "Deferred node password secret validation failed: %v", err)
// Return true to stop polling if the password verification failed; only retry on secret creation errors.
return errors.Is(err, nodepassword.ErrVerifyFailed), nil
}
runtime.Event.Event(nodeRef, corev1.EventTypeNormal, "NodePasswordValidationComplete", "Deferred node password secret validation complete")
return true, nil
}
return false, nil
})
}
// addressGetter is a common signature for functions that return an address channel
type addressGetter func(ctx context.Context) <-chan []string
// kubernetesGetter returns a function that returns a channel that can be read to get apiserver addresses from kubernetes endpoints
func kubernetesGetter(server *config.Control) addressGetter {
var endpointsClient typedcorev1.EndpointsInterface
return func(ctx context.Context) <-chan []string {
ch := make(chan []string, 1)
go func() {
if endpointsClient == nil {
if server.Runtime.K8s != nil {
endpointsClient = server.Runtime.K8s.CoreV1().Endpoints(metav1.NamespaceDefault)
}
}
if endpointsClient != nil {
if endpoint, err := endpointsClient.Get(ctx, "kubernetes", metav1.GetOptions{}); err != nil {
logrus.Debugf("Failed to get apiserver addresses from kubernetes: %v", err)
} else {
ch <- util.GetAddresses(endpoint)
}
}
close(ch)
}()
return ch
}
}
// etcdGetter returns a function that returns a channel that can be read to get apiserver addresses from etcd
func etcdGetter(server *config.Control) addressGetter {
return func(ctx context.Context) <-chan []string {
ch := make(chan []string, 1)
go func() {
if addresses, err := etcd.GetAPIServerURLsFromETCD(ctx, server); err != nil {
logrus.Debugf("Failed to get apiserver addresses from etcd: %v", err)
} else {
ch <- addresses
}
close(ch)
}()
return ch
}
}
// getAddressCollector returns a function that can be called to return
// apiserver addresses from both kubernetes and etcd
func getAddressCollector(server *config.Control) func(ctx context.Context) []string {
getFromKubernetes := kubernetesGetter(server)
getFromEtcd := etcdGetter(server)
// read from both kubernetes and etcd in parallel, returning the collected results
return func(ctx context.Context) []string {
a := sets.Set[string]{}
r := []string{}
k8sCh := getFromKubernetes(ctx)
k8sOk := true
etcdCh := getFromEtcd(ctx)
etcdOk := true
for k8sOk || etcdOk {
select {
case r, k8sOk = <-k8sCh:
a.Insert(r...)
case r, etcdOk = <-etcdCh:
a.Insert(r...)
case <-ctx.Done():
return a.UnsortedList()
}
}
return a.UnsortedList()
}
}

View File

@ -23,6 +23,7 @@ import (
"github.com/k3s-io/k3s/pkg/nodepassword"
"github.com/k3s-io/k3s/pkg/rootlessports"
"github.com/k3s-io/k3s/pkg/secretsencrypt"
"github.com/k3s-io/k3s/pkg/server/handlers"
"github.com/k3s-io/k3s/pkg/static"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/version"
@ -58,7 +59,7 @@ func StartServer(ctx context.Context, config *Config, cfg *cmds.Server) error {
wg := &sync.WaitGroup{}
wg.Add(len(config.StartupHooks))
config.ControlConfig.Runtime.Handler = router(ctx, config, cfg)
config.ControlConfig.Runtime.Handler = handlers.NewHandler(ctx, &config.ControlConfig, cfg)
config.ControlConfig.Runtime.StartupHooksWg = wg
shArgs := cmds.StartupHookArgs{
@ -346,7 +347,7 @@ func printTokens(config *config.Control) error {
var serverTokenFile string
if config.Runtime.ServerToken != "" {
serverTokenFile = filepath.Join(config.DataDir, "token")
if err := writeToken(config.Runtime.ServerToken, serverTokenFile, config.Runtime.ServerCA); err != nil {
if err := handlers.WriteToken(config.Runtime.ServerToken, serverTokenFile, config.Runtime.ServerCA); err != nil {
return err
}
@ -374,7 +375,7 @@ func printTokens(config *config.Control) error {
return err
}
}
if err := writeToken(config.Runtime.AgentToken, agentTokenFile, config.Runtime.ServerCA); err != nil {
if err := handlers.WriteToken(config.Runtime.AgentToken, agentTokenFile, config.Runtime.ServerCA); err != nil {
return err
}
} else if serverTokenFile != "" {
@ -490,18 +491,6 @@ func printToken(httpsPort int, advertiseIP, prefix, cmd, varName string) {
logrus.Infof("%s %s %s -s https://%s:%d -t ${%s}", prefix, version.Program, cmd, advertiseIP, httpsPort, varName)
}
func writeToken(token, file, certs string) error {
if len(token) == 0 {
return nil
}
token, err := clientaccess.FormatToken(token, certs)
if err != nil {
return err
}
return os.WriteFile(file, []byte(token+"\n"), 0600)
}
func setNoProxyEnv(config *config.Control) error {
splitter := func(c rune) bool {
return c == ','