portainer/api/kubernetes/cli/pod.go

121 lines
4.2 KiB
Go
Raw Normal View History

feat(kubernetes/shell): kubectl web shell and kubeconfig functionality EE-448 (#5229) * feat(kubernetes/shell): backport kubectl shell backend functionality EE-849 (#5168) * backported core backend kubectl shell functionality * - backported kubectl shell unit tests - backported k8s cli interface update - backported k8s client library fake patch * refactored backend to match EE * fixed test error typo * GetServiceAccountName -> GetServiceAccount - making the function reusable in multiple contexts * feat(kubernetes/shell): backport kubeconfig generation backend functionality EE-1004 (#5213) * backported core backend kubectl shell functionality * refactored backend to match EE * - backported kubernetes backend handler implementation - backported kubernetes config endpoint - backported kubeconfig file generation - backported kubeconfig and yaml unit tests - backported updates to kubeclient interfaces * feat(app): kubectl shell ui backport EE-927 (#5221) * Kubectl UI backport to CE * fix authentication redirect issue * comment out redirect function * fix shell full width & change name of shell * disable button when terminal connected * fixed whitespace changes for css * fixed whitespace changes for html * linting fixes Co-authored-by: zees-dev <dev.786zshan@gmail.com> * feat(kubernetes/shell): backport of kubeconfig export functionality EE-926 (#5228) * EE backport of kubeconfig UI functionality * using angularjs constant instead of hardcoded URL * updated portainer kubectl shell image * fix kubectl button position issue in ce * fix pod keep running when switching page * feat(app): Kubectl shell ui EE-833 EE-1099 (#5271) * fix kubectl shell css * fix mini css issue * fix tech issue for ui changes from review * delete unuse file * - refactored variable names - restored content-wrapper scroll - created object to store wrapper css Co-authored-by: zees-dev <dev.786zshan@gmail.com> * addressing PR issues * fix required changes from tech reviews (#5319) * fix required changes from tech reviews * remove unuse css variable * component refactor accoridng to PR and style guidelines Co-authored-by: zees-dev <dev.786zshan@gmail.com> * removed redundant dockerhub api endpoint variable * - autoHeight -> terminal-window - removed redundant try-catch - saving config.yaml file as config * fix(kube/shell): show error on failure * fixed default https bug * resolved merge conflicts Co-authored-by: Richard Wei <54336863+WaysonWei@users.noreply.github.com> Co-authored-by: richard <richard@richards-iMac-Pro.local> Co-authored-by: Chaim Lev-Ari <chiptus@gmail.com>
2021-08-05 03:02:06 +00:00
package cli
import (
"context"
"fmt"
"log"
"time"
"github.com/pkg/errors"
portainer "github.com/portainer/portainer/api"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const shellPodImage = "portainer/kubectl-shell"
// CreateUserShellPod will create a kubectl based shell for the specified user by mounting their respective service account.
// The lifecycle of the pod is managed in this function; this entails management of the following pod operations:
// - The shell pod will be scoped to specified service accounts access permissions
// - The shell pod will be automatically removed if it's not ready after specified period of time
// - The shell pod will be automatically removed after a specified max life (prevent zombie pods)
// - The shell pod will be automatically removed if request is cancelled (or client closes websocket connection)
func (kcl *KubeClient) CreateUserShellPod(ctx context.Context, serviceAccountName string) (*portainer.KubernetesShellPod, error) {
// Schedule the pod for automatic removal
maxPodKeepAlive := 1 * time.Hour
maxPodKeepAliveSecondsStr := fmt.Sprintf("%d", int(maxPodKeepAlive.Seconds()))
podPrefix := userShellPodPrefix(serviceAccountName)
podSpec := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: podPrefix,
Namespace: portainerNamespace,
Annotations: map[string]string{
"kubernetes.io/pod.type": "kubectl-shell",
},
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: new(int64),
ServiceAccountName: serviceAccountName,
Containers: []v1.Container{
{
Name: "kubectl-shell-container",
Image: shellPodImage,
Command: []string{"sleep"},
// Specify sleep time to prevent zombie pods in case portainer process is terminated
Args: []string{maxPodKeepAliveSecondsStr},
ImagePullPolicy: v1.PullIfNotPresent,
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
shellPod, err := kcl.cli.CoreV1().Pods(portainerNamespace).Create(podSpec)
if err != nil {
return nil, errors.Wrap(err, "error creating shell pod")
}
// Wait for pod to reach ready state
timeoutCtx, cancelFunc := context.WithTimeout(ctx, 20*time.Second)
defer cancelFunc()
err = kcl.waitForPodStatus(timeoutCtx, v1.PodRunning, shellPod)
if err != nil {
kcl.cli.CoreV1().Pods(portainerNamespace).Delete(shellPod.Name, nil)
return nil, errors.Wrap(err, "aborting pod creation; error waiting for shell pod ready status")
}
if len(shellPod.Spec.Containers) != 1 {
kcl.cli.CoreV1().Pods(portainerNamespace).Delete(shellPod.Name, nil)
return nil, fmt.Errorf("incorrect shell pod state, expecting single container to be present")
}
podData := &portainer.KubernetesShellPod{
Namespace: shellPod.Namespace,
PodName: shellPod.Name,
ContainerName: shellPod.Spec.Containers[0].Name,
ShellExecCommand: "env COLUMNS=200 /bin/bash", // env COLUMNS dictates minimum width of the shell
}
// Handle pod lifecycle/cleanup - terminate pod after maxPodKeepAlive or upon request (long-lived) cancellation
go func() {
select {
case <-time.After(maxPodKeepAlive):
log.Println("[DEBUG] [internal,kubernetes/pod] [message: pod removal schedule duration exceeded]")
kcl.cli.CoreV1().Pods(portainerNamespace).Delete(shellPod.Name, nil)
case <-ctx.Done():
err := ctx.Err()
log.Printf("[DEBUG] [internal,kubernetes/pod] [message: context error: err=%s ]\n", err)
kcl.cli.CoreV1().Pods(portainerNamespace).Delete(shellPod.Name, nil)
}
}()
return podData, nil
}
// waitForPodStatus will wait until duration d (from now) for a pod to reach defined phase/status.
// The pod status will be polled at specified delay until the pod reaches ready state.
func (kcl *KubeClient) waitForPodStatus(ctx context.Context, phase v1.PodPhase, pod *v1.Pod) error {
log.Printf("[DEBUG] [internal,kubernetes/pod] [message: waiting for pod ready: pod=%s... ]\n", pod.Name)
pollDelay := 500 * time.Millisecond
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
pod, err := kcl.cli.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return err
}
if pod.Status.Phase == phase {
return nil
}
<-time.After(pollDelay)
}
}
}