2021-03-10 07:18:13 +00:00
|
|
|
package util
|
|
|
|
|
|
|
|
import (
|
2021-09-10 21:02:51 +00:00
|
|
|
"context"
|
|
|
|
"fmt"
|
2021-03-10 07:18:13 +00:00
|
|
|
"net"
|
2021-09-10 21:02:51 +00:00
|
|
|
"net/http"
|
2021-12-07 22:31:32 +00:00
|
|
|
"os"
|
2021-03-10 07:18:13 +00:00
|
|
|
"strconv"
|
2021-09-10 21:02:51 +00:00
|
|
|
"time"
|
2021-03-10 07:18:13 +00:00
|
|
|
|
2021-09-10 21:28:48 +00:00
|
|
|
"github.com/pkg/errors"
|
2024-05-06 16:42:27 +00:00
|
|
|
"github.com/rancher/wrangler/v3/pkg/merr"
|
|
|
|
"github.com/rancher/wrangler/v3/pkg/schemes"
|
2021-09-10 21:02:51 +00:00
|
|
|
"github.com/sirupsen/logrus"
|
2022-10-20 19:34:41 +00:00
|
|
|
authorizationv1 "k8s.io/api/authorization/v1"
|
2021-03-10 07:18:13 +00:00
|
|
|
v1 "k8s.io/api/core/v1"
|
2022-10-20 19:34:41 +00:00
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
2022-04-29 19:57:38 +00:00
|
|
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
2021-09-10 21:02:51 +00:00
|
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
2022-04-29 19:57:38 +00:00
|
|
|
"k8s.io/client-go/dynamic"
|
2021-09-10 21:02:51 +00:00
|
|
|
clientset "k8s.io/client-go/kubernetes"
|
2022-10-20 19:34:41 +00:00
|
|
|
authorizationv1client "k8s.io/client-go/kubernetes/typed/authorization/v1"
|
2021-12-07 22:31:32 +00:00
|
|
|
coregetter "k8s.io/client-go/kubernetes/typed/core/v1"
|
2022-04-29 19:57:38 +00:00
|
|
|
"k8s.io/client-go/rest"
|
|
|
|
"k8s.io/client-go/tools/clientcmd"
|
2021-12-07 22:31:32 +00:00
|
|
|
"k8s.io/client-go/tools/record"
|
2021-03-10 07:18:13 +00:00
|
|
|
)
|
|
|
|
|
2021-11-11 21:01:49 +00:00
|
|
|
// This sets a default duration to wait for the apiserver to become ready. This is primarily used to
|
|
|
|
// block startup of agent supervisor controllers until the apiserver is ready to serve requests, in the
|
|
|
|
// same way that the apiReady channel is used in the server packages, so it can be fairly long. It must
|
|
|
|
// be at least long enough for downstream projects like RKE2 to start the apiserver in the background.
|
|
|
|
const DefaultAPIServerReadyTimeout = 15 * time.Minute
|
|
|
|
|
2021-03-10 07:18:13 +00:00
|
|
|
func GetAddresses(endpoint *v1.Endpoints) []string {
|
|
|
|
serverAddresses := []string{}
|
|
|
|
if endpoint == nil {
|
|
|
|
return serverAddresses
|
|
|
|
}
|
|
|
|
for _, subset := range endpoint.Subsets {
|
|
|
|
var port string
|
|
|
|
if len(subset.Ports) > 0 {
|
|
|
|
port = strconv.Itoa(int(subset.Ports[0].Port))
|
|
|
|
}
|
|
|
|
if port == "" {
|
|
|
|
port = "443"
|
|
|
|
}
|
|
|
|
for _, address := range subset.Addresses {
|
|
|
|
serverAddresses = append(serverAddresses, net.JoinHostPort(address.IP, port))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return serverAddresses
|
|
|
|
}
|
2021-09-10 21:02:51 +00:00
|
|
|
|
|
|
|
// WaitForAPIServerReady waits for the API Server's /readyz endpoint to report "ok" with timeout.
|
2021-11-11 21:01:49 +00:00
|
|
|
// This is modified from WaitForAPIServer from the Kubernetes controller-manager app, but checks the
|
|
|
|
// readyz endpoint instead of the deprecated healthz endpoint, and supports context.
|
2022-04-29 19:57:38 +00:00
|
|
|
func WaitForAPIServerReady(ctx context.Context, kubeconfigPath string, timeout time.Duration) error {
|
2021-09-10 21:02:51 +00:00
|
|
|
var lastErr error
|
2022-04-29 19:57:38 +00:00
|
|
|
restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// By default, idle connections to the apiserver are returned to a global pool
|
|
|
|
// between requests. Explicitly flag this client's request for closure so that
|
|
|
|
// we re-dial through the loadbalancer in case the endpoints have changed.
|
|
|
|
restConfig.Wrap(func(rt http.RoundTripper) http.RoundTripper {
|
|
|
|
return roundTripFunc(func(req *http.Request) (*http.Response, error) {
|
|
|
|
req.Close = true
|
|
|
|
return rt.RoundTrip(req)
|
|
|
|
})
|
|
|
|
})
|
|
|
|
|
|
|
|
restConfig = dynamic.ConfigFor(restConfig)
|
|
|
|
restConfig.GroupVersion = &schema.GroupVersion{}
|
|
|
|
restClient, err := rest.RESTClientFor(restConfig)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-09-10 21:02:51 +00:00
|
|
|
|
2022-04-29 19:57:38 +00:00
|
|
|
err = wait.PollImmediateWithContext(ctx, time.Second, timeout, func(ctx context.Context) (bool, error) {
|
2021-09-10 21:02:51 +00:00
|
|
|
healthStatus := 0
|
2021-11-11 21:01:49 +00:00
|
|
|
result := restClient.Get().AbsPath("/readyz").Do(ctx).StatusCode(&healthStatus)
|
2021-09-10 21:28:48 +00:00
|
|
|
if rerr := result.Error(); rerr != nil {
|
|
|
|
lastErr = errors.Wrap(rerr, "failed to get apiserver /readyz status")
|
2021-09-10 21:02:51 +00:00
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
if healthStatus != http.StatusOK {
|
|
|
|
content, _ := result.Raw()
|
|
|
|
lastErr = fmt.Errorf("APIServer isn't ready: %v", string(content))
|
|
|
|
logrus.Warnf("APIServer isn't ready yet: %v. Waiting a little while.", string(content))
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
2021-09-10 21:28:48 +00:00
|
|
|
return merr.NewErrors(err, lastErr)
|
2021-09-10 21:02:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2021-12-07 22:31:32 +00:00
|
|
|
|
2022-10-20 19:34:41 +00:00
|
|
|
type genericAccessReviewRequest func(context.Context) (*authorizationv1.SubjectAccessReviewStatus, error)
|
|
|
|
|
|
|
|
// WaitForRBACReady polls an AccessReview request until it returns an allowed response. If the user
|
|
|
|
// and group are empty, it uses SelfSubjectAccessReview, otherwise SubjectAccessReview is used. It
|
|
|
|
// will return an error if the timeout expires, or nil if the SubjectAccessReviewStatus indicates
|
|
|
|
// the access would be allowed.
|
|
|
|
func WaitForRBACReady(ctx context.Context, kubeconfigPath string, timeout time.Duration, ra authorizationv1.ResourceAttributes, user string, groups ...string) error {
|
|
|
|
var lastErr error
|
|
|
|
restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
authClient, err := authorizationv1client.NewForConfig(restConfig)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var reviewFunc genericAccessReviewRequest
|
|
|
|
if len(user) == 0 && len(groups) == 0 {
|
|
|
|
reviewFunc = selfSubjectAccessReview(authClient, ra)
|
|
|
|
} else {
|
|
|
|
reviewFunc = subjectAccessReview(authClient, ra, user, groups)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = wait.PollImmediateWithContext(ctx, time.Second, timeout, func(ctx context.Context) (bool, error) {
|
|
|
|
status, rerr := reviewFunc(ctx)
|
|
|
|
if rerr != nil {
|
|
|
|
lastErr = rerr
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
if status.Allowed {
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
lastErr = errors.New(status.Reason)
|
|
|
|
return false, nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return merr.NewErrors(err, lastErr)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// selfSubjectAccessReview returns a function that makes SelfSubjectAccessReview requests using the
|
|
|
|
// provided client and attributes, returning a status or error.
|
|
|
|
func selfSubjectAccessReview(authClient *authorizationv1client.AuthorizationV1Client, ra authorizationv1.ResourceAttributes) genericAccessReviewRequest {
|
|
|
|
return func(ctx context.Context) (*authorizationv1.SubjectAccessReviewStatus, error) {
|
|
|
|
r, err := authClient.SelfSubjectAccessReviews().Create(ctx, &authorizationv1.SelfSubjectAccessReview{
|
|
|
|
Spec: authorizationv1.SelfSubjectAccessReviewSpec{
|
|
|
|
ResourceAttributes: &ra,
|
|
|
|
},
|
|
|
|
}, metav1.CreateOptions{})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &r.Status, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// subjectAccessReview returns a function that makes SubjectAccessReview requests using the
|
|
|
|
// provided client, attributes, user, and group, returning a status or error.
|
|
|
|
func subjectAccessReview(authClient *authorizationv1client.AuthorizationV1Client, ra authorizationv1.ResourceAttributes, user string, groups []string) genericAccessReviewRequest {
|
|
|
|
return func(ctx context.Context) (*authorizationv1.SubjectAccessReviewStatus, error) {
|
|
|
|
r, err := authClient.SubjectAccessReviews().Create(ctx, &authorizationv1.SubjectAccessReview{
|
|
|
|
Spec: authorizationv1.SubjectAccessReviewSpec{
|
|
|
|
ResourceAttributes: &ra,
|
|
|
|
User: user,
|
|
|
|
Groups: groups,
|
|
|
|
},
|
|
|
|
}, metav1.CreateOptions{})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &r.Status, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-23 17:26:21 +00:00
|
|
|
func BuildControllerEventRecorder(k8s clientset.Interface, controllerName, namespace string) record.EventRecorder {
|
2021-12-07 22:31:32 +00:00
|
|
|
logrus.Infof("Creating %s event broadcaster", controllerName)
|
|
|
|
eventBroadcaster := record.NewBroadcaster()
|
2022-09-30 21:28:32 +00:00
|
|
|
eventBroadcaster.StartStructuredLogging(0)
|
2022-03-23 17:26:21 +00:00
|
|
|
eventBroadcaster.StartRecordingToSink(&coregetter.EventSinkImpl{Interface: k8s.CoreV1().Events(namespace)})
|
2021-12-07 22:31:32 +00:00
|
|
|
nodeName := os.Getenv("NODE_NAME")
|
|
|
|
return eventBroadcaster.NewRecorder(schemes.All, v1.EventSource{Component: controllerName, Host: nodeName})
|
|
|
|
}
|
2022-04-29 19:57:38 +00:00
|
|
|
|
|
|
|
type roundTripFunc func(req *http.Request) (*http.Response, error)
|
|
|
|
|
|
|
|
func (w roundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) {
|
|
|
|
return w(req)
|
|
|
|
}
|