mirror of https://github.com/portainer/portainer
fix(advance deploy): EE-1141 A standard user can escalate to cluster administrator privileges on Kubernetes (#5325)
* fix(advance deploy): EE-1141 A standard user can escalate to cluster administrator privileges on Kubernetes * fix(advance deploy): EE-1141 reuse existing token cache when do deployment * fix: EE-1141 use user's SA token to exec pod command * fix: EE-1141 stop advanced-deploy or pod-exec if user's SA token is empty * fix: EE-1141 resolve merge conflicts Co-authored-by: Simon Meng <simon.meng@portainer.io>pull/5387/head
parent
5652bac004
commit
51ef2c2aa9
|
@ -95,8 +95,8 @@ func initSwarmStackManager(assetsPath string, dataStorePath string, signatureSer
|
|||
return exec.NewSwarmStackManager(assetsPath, dataStorePath, signatureService, fileService, reverseTunnelService)
|
||||
}
|
||||
|
||||
func initKubernetesDeployer(dataStore portainer.DataStore, reverseTunnelService portainer.ReverseTunnelService, signatureService portainer.DigitalSignatureService, assetsPath string) portainer.KubernetesDeployer {
|
||||
return exec.NewKubernetesDeployer(dataStore, reverseTunnelService, signatureService, assetsPath)
|
||||
func initKubernetesDeployer(kubernetesTokenCacheManager *kubeproxy.TokenCacheManager, kubernetesClientFactory *kubecli.ClientFactory, dataStore portainer.DataStore, reverseTunnelService portainer.ReverseTunnelService, signatureService portainer.DigitalSignatureService, assetsPath string) portainer.KubernetesDeployer {
|
||||
return exec.NewKubernetesDeployer(kubernetesTokenCacheManager, kubernetesClientFactory, dataStore, reverseTunnelService, signatureService, assetsPath)
|
||||
}
|
||||
|
||||
func initJWTService(dataStore portainer.DataStore) (portainer.JWTService, error) {
|
||||
|
@ -408,7 +408,7 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
|
|||
|
||||
composeStackManager := initComposeStackManager(*flags.Assets, *flags.Data, reverseTunnelService, proxyManager)
|
||||
|
||||
kubernetesDeployer := initKubernetesDeployer(dataStore, reverseTunnelService, digitalSignatureService, *flags.Assets)
|
||||
kubernetesDeployer := initKubernetesDeployer(kubernetesTokenCacheManager, kubernetesClientFactory, dataStore, reverseTunnelService, digitalSignatureService, *flags.Assets)
|
||||
|
||||
if dataStore.IsNew() {
|
||||
err = updateSettingsFromFlags(dataStore, flags)
|
||||
|
|
|
@ -5,6 +5,9 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/portainer/portainer/api/http/proxy/factory/kubernetes"
|
||||
"github.com/portainer/portainer/api/http/security"
|
||||
"github.com/portainer/portainer/api/kubernetes/cli"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -20,27 +23,64 @@ import (
|
|||
|
||||
// KubernetesDeployer represents a service to deploy resources inside a Kubernetes environment.
|
||||
type KubernetesDeployer struct {
|
||||
binaryPath string
|
||||
dataStore portainer.DataStore
|
||||
reverseTunnelService portainer.ReverseTunnelService
|
||||
signatureService portainer.DigitalSignatureService
|
||||
binaryPath string
|
||||
dataStore portainer.DataStore
|
||||
reverseTunnelService portainer.ReverseTunnelService
|
||||
signatureService portainer.DigitalSignatureService
|
||||
kubernetesClientFactory *cli.ClientFactory
|
||||
kubernetesTokenCacheManager *kubernetes.TokenCacheManager
|
||||
}
|
||||
|
||||
// NewKubernetesDeployer initializes a new KubernetesDeployer service.
|
||||
func NewKubernetesDeployer(datastore portainer.DataStore, reverseTunnelService portainer.ReverseTunnelService, signatureService portainer.DigitalSignatureService, binaryPath string) *KubernetesDeployer {
|
||||
func NewKubernetesDeployer(kubernetesTokenCacheManager *kubernetes.TokenCacheManager, kubernetesClientFactory *cli.ClientFactory, datastore portainer.DataStore, reverseTunnelService portainer.ReverseTunnelService, signatureService portainer.DigitalSignatureService, binaryPath string) *KubernetesDeployer {
|
||||
return &KubernetesDeployer{
|
||||
binaryPath: binaryPath,
|
||||
dataStore: datastore,
|
||||
reverseTunnelService: reverseTunnelService,
|
||||
signatureService: signatureService,
|
||||
binaryPath: binaryPath,
|
||||
dataStore: datastore,
|
||||
reverseTunnelService: reverseTunnelService,
|
||||
signatureService: signatureService,
|
||||
kubernetesClientFactory: kubernetesClientFactory,
|
||||
kubernetesTokenCacheManager: kubernetesTokenCacheManager,
|
||||
}
|
||||
}
|
||||
|
||||
func (deployer *KubernetesDeployer) getToken(request *http.Request, endpoint *portainer.Endpoint, setLocalAdminToken bool) (string, error) {
|
||||
tokenData, err := security.RetrieveTokenData(request)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
kubecli, err := deployer.kubernetesClientFactory.GetKubeClient(endpoint)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
tokenCache := deployer.kubernetesTokenCacheManager.GetOrCreateTokenCache(int(endpoint.ID))
|
||||
|
||||
tokenManager, err := kubernetes.NewTokenManager(kubecli, deployer.dataStore, tokenCache, setLocalAdminToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if tokenData.Role == portainer.AdministratorRole {
|
||||
return tokenManager.GetAdminServiceAccountToken(), nil
|
||||
}
|
||||
|
||||
token, err := tokenManager.GetUserServiceAccountToken(int(tokenData.ID), endpoint.ID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if token == "" {
|
||||
return "", fmt.Errorf("can not get a valid user service account token")
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
// Deploy will deploy a Kubernetes manifest inside a specific namespace in a Kubernetes endpoint.
|
||||
// Otherwise it will use kubectl to deploy the manifest.
|
||||
func (deployer *KubernetesDeployer) Deploy(endpoint *portainer.Endpoint, stackConfig string, namespace string) (string, error) {
|
||||
func (deployer *KubernetesDeployer) Deploy(request *http.Request, endpoint *portainer.Endpoint, stackConfig string, namespace string) (string, error) {
|
||||
if endpoint.Type == portainer.KubernetesLocalEnvironment {
|
||||
token, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token")
|
||||
token, err := deployer.getToken(request, endpoint, true);
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -53,7 +93,7 @@ func (deployer *KubernetesDeployer) Deploy(endpoint *portainer.Endpoint, stackCo
|
|||
args := make([]string, 0)
|
||||
args = append(args, "--server", endpoint.URL)
|
||||
args = append(args, "--insecure-skip-tls-verify")
|
||||
args = append(args, "--token", string(token))
|
||||
args = append(args, "--token", token)
|
||||
args = append(args, "--namespace", namespace)
|
||||
args = append(args, "apply", "-f", "-")
|
||||
|
||||
|
@ -139,8 +179,14 @@ func (deployer *KubernetesDeployer) Deploy(endpoint *portainer.Endpoint, stackCo
|
|||
return "", err
|
||||
}
|
||||
|
||||
token, err := deployer.getToken(request, endpoint, false);
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
req.Header.Set(portainer.PortainerAgentPublicKeyHeader, deployer.signatureService.EncodedPublicKey())
|
||||
req.Header.Set(portainer.PortainerAgentSignatureHeader, signature)
|
||||
req.Header.Set(portainer.PortainerAgentKubernetesSATokenHeader, token)
|
||||
|
||||
resp, err := httpCli.Do(req)
|
||||
if err != nil {
|
||||
|
|
|
@ -95,7 +95,7 @@ func (handler *Handler) createKubernetesStackFromFileContent(w http.ResponseWrit
|
|||
doCleanUp := true
|
||||
defer handler.cleanUp(stack, &doCleanUp)
|
||||
|
||||
output, err := handler.deployKubernetesStack(endpoint, payload.StackFileContent, payload.ComposeFormat, payload.Namespace)
|
||||
output, err := handler.deployKubernetesStack(r, endpoint, payload.StackFileContent, payload.ComposeFormat, payload.Namespace)
|
||||
if err != nil {
|
||||
return &httperror.HandlerError{StatusCode: http.StatusInternalServerError, Message: "Unable to deploy Kubernetes stack", Err: err}
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ func (handler *Handler) createKubernetesStackFromGitRepository(w http.ResponseWr
|
|||
return &httperror.HandlerError{StatusCode: http.StatusInternalServerError, Message: "Failed to process manifest from Git repository", Err: err}
|
||||
}
|
||||
|
||||
output, err := handler.deployKubernetesStack(endpoint, stackFileContent, payload.ComposeFormat, payload.Namespace)
|
||||
output, err := handler.deployKubernetesStack(r, endpoint, stackFileContent, payload.ComposeFormat, payload.Namespace)
|
||||
if err != nil {
|
||||
return &httperror.HandlerError{StatusCode: http.StatusInternalServerError, Message: "Unable to deploy Kubernetes stack", Err: err}
|
||||
}
|
||||
|
@ -155,7 +155,7 @@ func (handler *Handler) createKubernetesStackFromGitRepository(w http.ResponseWr
|
|||
return response.JSON(w, resp)
|
||||
}
|
||||
|
||||
func (handler *Handler) deployKubernetesStack(endpoint *portainer.Endpoint, stackConfig string, composeFormat bool, namespace string) (string, error) {
|
||||
func (handler *Handler) deployKubernetesStack(request *http.Request, endpoint *portainer.Endpoint, stackConfig string, composeFormat bool, namespace string) (string, error) {
|
||||
handler.stackCreationMutex.Lock()
|
||||
defer handler.stackCreationMutex.Unlock()
|
||||
|
||||
|
@ -167,7 +167,7 @@ func (handler *Handler) deployKubernetesStack(endpoint *portainer.Endpoint, stac
|
|||
stackConfig = string(convertedConfig)
|
||||
}
|
||||
|
||||
return handler.KubernetesDeployer.Deploy(endpoint, stackConfig, namespace)
|
||||
return handler.KubernetesDeployer.Deploy(request, endpoint, stackConfig, namespace)
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"github.com/gorilla/websocket"
|
||||
httperror "github.com/portainer/libhttp/error"
|
||||
portainer "github.com/portainer/portainer/api"
|
||||
"github.com/portainer/portainer/api/http/proxy/factory/kubernetes"
|
||||
"github.com/portainer/portainer/api/http/security"
|
||||
"github.com/portainer/portainer/api/kubernetes/cli"
|
||||
)
|
||||
|
@ -12,20 +13,22 @@ import (
|
|||
// Handler is the HTTP handler used to handle websocket operations.
|
||||
type Handler struct {
|
||||
*mux.Router
|
||||
DataStore portainer.DataStore
|
||||
SignatureService portainer.DigitalSignatureService
|
||||
ReverseTunnelService portainer.ReverseTunnelService
|
||||
KubernetesClientFactory *cli.ClientFactory
|
||||
requestBouncer *security.RequestBouncer
|
||||
connectionUpgrader websocket.Upgrader
|
||||
DataStore portainer.DataStore
|
||||
SignatureService portainer.DigitalSignatureService
|
||||
ReverseTunnelService portainer.ReverseTunnelService
|
||||
KubernetesClientFactory *cli.ClientFactory
|
||||
requestBouncer *security.RequestBouncer
|
||||
connectionUpgrader websocket.Upgrader
|
||||
kubernetesTokenCacheManager *kubernetes.TokenCacheManager
|
||||
}
|
||||
|
||||
// NewHandler creates a handler to manage websocket operations.
|
||||
func NewHandler(bouncer *security.RequestBouncer) *Handler {
|
||||
func NewHandler(kubernetesTokenCacheManager *kubernetes.TokenCacheManager, bouncer *security.RequestBouncer) *Handler {
|
||||
h := &Handler{
|
||||
Router: mux.NewRouter(),
|
||||
connectionUpgrader: websocket.Upgrader{},
|
||||
requestBouncer: bouncer,
|
||||
kubernetesTokenCacheManager: kubernetesTokenCacheManager,
|
||||
}
|
||||
h.PathPrefix("/websocket/exec").Handler(
|
||||
bouncer.AuthenticatedAccess(httperror.LoggerHandler(h.websocketExec)))
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package websocket
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/portainer/portainer/api/http/security"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
|
@ -11,6 +13,7 @@ import (
|
|||
"github.com/portainer/libhttp/request"
|
||||
portainer "github.com/portainer/portainer/api"
|
||||
bolterrors "github.com/portainer/portainer/api/bolt/errors"
|
||||
"github.com/portainer/portainer/api/http/proxy/factory/kubernetes"
|
||||
)
|
||||
|
||||
// @summary Execute a websocket on pod
|
||||
|
@ -70,8 +73,14 @@ func (handler *Handler) websocketPodExec(w http.ResponseWriter, r *http.Request)
|
|||
return &httperror.HandlerError{http.StatusForbidden, "Permission denied to access endpoint", err}
|
||||
}
|
||||
|
||||
token, useAdminToken, err := handler.getToken(r, endpoint, false)
|
||||
if err != nil {
|
||||
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to get user service account token", err}
|
||||
}
|
||||
|
||||
params := &webSocketRequestParams{
|
||||
endpoint: endpoint,
|
||||
token: token,
|
||||
}
|
||||
|
||||
r.Header.Del("Origin")
|
||||
|
@ -112,7 +121,7 @@ func (handler *Handler) websocketPodExec(w http.ResponseWriter, r *http.Request)
|
|||
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to create Kubernetes client", err}
|
||||
}
|
||||
|
||||
err = cli.StartExecProcess(namespace, podName, containerName, commandArray, stdinReader, stdoutWriter)
|
||||
err = cli.StartExecProcess(token, useAdminToken, namespace, podName, containerName, commandArray, stdinReader, stdoutWriter)
|
||||
if err != nil {
|
||||
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to start exec process inside container", err}
|
||||
}
|
||||
|
@ -124,3 +133,37 @@ func (handler *Handler) websocketPodExec(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *Handler) getToken(request *http.Request, endpoint *portainer.Endpoint, setLocalAdminToken bool) (string, bool, error) {
|
||||
tokenData, err := security.RetrieveTokenData(request)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
|
||||
kubecli, err := handler.KubernetesClientFactory.GetKubeClient(endpoint)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
|
||||
tokenCache := handler.kubernetesTokenCacheManager.GetOrCreateTokenCache(int(endpoint.ID))
|
||||
|
||||
tokenManager, err := kubernetes.NewTokenManager(kubecli, handler.DataStore, tokenCache, setLocalAdminToken)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
|
||||
if tokenData.Role == portainer.AdministratorRole {
|
||||
return tokenManager.GetAdminServiceAccountToken(), true, nil
|
||||
}
|
||||
|
||||
token, err := tokenManager.GetUserServiceAccountToken(int(tokenData.ID), endpoint.ID)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
|
||||
if token == "" {
|
||||
return "", false, fmt.Errorf("can not get a valid user service account token")
|
||||
}
|
||||
|
||||
return token, false, nil
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ func (handler *Handler) proxyEdgeAgentWebsocketRequest(w http.ResponseWriter, r
|
|||
|
||||
proxy.Director = func(incoming *http.Request, out http.Header) {
|
||||
out.Set(portainer.PortainerAgentTargetHeader, params.nodeName)
|
||||
out.Set(portainer.PortainerAgentKubernetesSATokenHeader, params.token)
|
||||
}
|
||||
|
||||
handler.ReverseTunnelService.SetTunnelStatusToActive(params.endpoint.ID)
|
||||
|
@ -64,6 +65,7 @@ func (handler *Handler) proxyAgentWebsocketRequest(w http.ResponseWriter, r *htt
|
|||
out.Set(portainer.PortainerAgentPublicKeyHeader, handler.SignatureService.EncodedPublicKey())
|
||||
out.Set(portainer.PortainerAgentSignatureHeader, signature)
|
||||
out.Set(portainer.PortainerAgentTargetHeader, params.nodeName)
|
||||
out.Set(portainer.PortainerAgentKubernetesSATokenHeader, params.token)
|
||||
}
|
||||
|
||||
proxy.ServeHTTP(w, r)
|
||||
|
|
|
@ -8,4 +8,5 @@ type webSocketRequestParams struct {
|
|||
ID string
|
||||
nodeName string
|
||||
endpoint *portainer.Endpoint
|
||||
token string
|
||||
}
|
||||
|
|
|
@ -1,10 +1,8 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
|
||||
portainer "github.com/portainer/portainer/api"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
const defaultServiceAccountTokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
|
@ -13,7 +11,6 @@ type tokenManager struct {
|
|||
tokenCache *tokenCache
|
||||
kubecli portainer.KubeClient
|
||||
dataStore portainer.DataStore
|
||||
mutex sync.Mutex
|
||||
adminToken string
|
||||
}
|
||||
|
||||
|
@ -25,7 +22,6 @@ func NewTokenManager(kubecli portainer.KubeClient, dataStore portainer.DataStore
|
|||
tokenCache: cache,
|
||||
kubecli: kubecli,
|
||||
dataStore: dataStore,
|
||||
mutex: sync.Mutex{},
|
||||
adminToken: "",
|
||||
}
|
||||
|
||||
|
@ -41,13 +37,13 @@ func NewTokenManager(kubecli portainer.KubeClient, dataStore portainer.DataStore
|
|||
return tokenManager, nil
|
||||
}
|
||||
|
||||
func (manager *tokenManager) getAdminServiceAccountToken() string {
|
||||
func (manager *tokenManager) GetAdminServiceAccountToken() string {
|
||||
return manager.adminToken
|
||||
}
|
||||
|
||||
func (manager *tokenManager) getUserServiceAccountToken(userID int, endpointID portainer.EndpointID) (string, error) {
|
||||
manager.mutex.Lock()
|
||||
defer manager.mutex.Unlock()
|
||||
func (manager *tokenManager) GetUserServiceAccountToken(userID int, endpointID portainer.EndpointID) (string, error) {
|
||||
manager.tokenCache.mutex.Lock()
|
||||
defer manager.tokenCache.mutex.Unlock()
|
||||
|
||||
token, ok := manager.tokenCache.getToken(userID)
|
||||
if !ok {
|
||||
|
|
|
@ -2,6 +2,7 @@ package kubernetes
|
|||
|
||||
import (
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/orcaman/concurrent-map"
|
||||
)
|
||||
|
@ -14,6 +15,7 @@ type (
|
|||
|
||||
tokenCache struct {
|
||||
userTokenCache cmap.ConcurrentMap
|
||||
mutex sync.Mutex
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -35,6 +37,18 @@ func (manager *TokenCacheManager) CreateTokenCache(endpointID int) *tokenCache {
|
|||
return tokenCache
|
||||
}
|
||||
|
||||
// GetOrCreateTokenCache will get the tokenCache from the manager map of caches if it exists,
|
||||
// otherwise it will create a new tokenCache object, associate it to the manager map of caches
|
||||
// and return a pointer to that tokenCache instance.
|
||||
func (manager *TokenCacheManager) GetOrCreateTokenCache(endpointID int) *tokenCache {
|
||||
key := strconv.Itoa(endpointID)
|
||||
if epCache, ok := manager.tokenCaches.Get(key); ok {
|
||||
return epCache.(*tokenCache)
|
||||
}
|
||||
|
||||
return manager.CreateTokenCache(endpointID)
|
||||
}
|
||||
|
||||
// RemoveUserFromCache will ensure that the specific userID is removed from all registered caches.
|
||||
func (manager *TokenCacheManager) RemoveUserFromCache(userID int) {
|
||||
for cache := range manager.tokenCaches.IterBuffered() {
|
||||
|
@ -45,6 +59,7 @@ func (manager *TokenCacheManager) RemoveUserFromCache(userID int) {
|
|||
func newTokenCache() *tokenCache {
|
||||
return &tokenCache{
|
||||
userTokenCache: cmap.New(),
|
||||
mutex: sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -110,9 +110,9 @@ func (transport *baseTransport) getRoundTripToken(request *http.Request, tokenMa
|
|||
|
||||
var token string
|
||||
if tokenData.Role == portainer.AdministratorRole {
|
||||
token = tokenManager.getAdminServiceAccountToken()
|
||||
token = tokenManager.GetAdminServiceAccountToken()
|
||||
} else {
|
||||
token, err = tokenManager.getUserServiceAccountToken(int(tokenData.ID), transport.endpoint.ID)
|
||||
token, err = tokenManager.GetUserServiceAccountToken(int(tokenData.ID), transport.endpoint.ID)
|
||||
if err != nil {
|
||||
log.Printf("Failed retrieving service account token: %v", err)
|
||||
return "", err
|
||||
|
|
|
@ -202,7 +202,7 @@ func (server *Server) Start() error {
|
|||
userHandler.DataStore = server.DataStore
|
||||
userHandler.CryptoService = server.CryptoService
|
||||
|
||||
var websocketHandler = websocket.NewHandler(requestBouncer)
|
||||
var websocketHandler = websocket.NewHandler(server.KubernetesTokenCacheManager, requestBouncer)
|
||||
websocketHandler.DataStore = server.DataStore
|
||||
websocketHandler.SignatureService = server.SignatureService
|
||||
websocketHandler.ReverseTunnelService = server.ReverseTunnelService
|
||||
|
|
|
@ -14,13 +14,18 @@ import (
|
|||
// StartExecProcess will start an exec process inside a container located inside a pod inside a specific namespace
|
||||
// using the specified command. The stdin parameter will be bound to the stdin process and the stdout process will write
|
||||
// to the stdout parameter.
|
||||
// This function only works against a local endpoint using an in-cluster config.
|
||||
func (kcl *KubeClient) StartExecProcess(namespace, podName, containerName string, command []string, stdin io.Reader, stdout io.Writer) error {
|
||||
// This function only works against a local endpoint using an in-cluster config with the user's SA token.
|
||||
func (kcl *KubeClient) StartExecProcess(token string, useAdminToken bool, namespace, podName, containerName string, command []string, stdin io.Reader, stdout io.Writer) error {
|
||||
config, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !useAdminToken {
|
||||
config.BearerToken = token
|
||||
config.BearerTokenFile = ""
|
||||
}
|
||||
|
||||
req := kcl.cli.CoreV1().RESTClient().
|
||||
Post().
|
||||
Resource("pods").
|
||||
|
|
|
@ -2,6 +2,7 @@ package portainer
|
|||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
gittypes "github.com/portainer/portainer/api/git/types"
|
||||
|
@ -1173,7 +1174,7 @@ type (
|
|||
KubeClient interface {
|
||||
SetupUserServiceAccount(userID int, teamIDs []int, restrictDefaultNamespace bool) error
|
||||
GetServiceAccountBearerToken(userID int) (string, error)
|
||||
StartExecProcess(namespace, podName, containerName string, command []string, stdin io.Reader, stdout io.Writer) error
|
||||
StartExecProcess(token string, useAdminToken bool, namespace, podName, containerName string, command []string, stdin io.Reader, stdout io.Writer) error
|
||||
NamespaceAccessPoliciesDeleteNamespace(namespace string) error
|
||||
GetNamespaceAccessPolicies() (map[string]K8sNamespaceAccessPolicy, error)
|
||||
UpdateNamespaceAccessPolicies(accessPolicies map[string]K8sNamespaceAccessPolicy) error
|
||||
|
@ -1184,7 +1185,7 @@ type (
|
|||
|
||||
// KubernetesDeployer represents a service to deploy a manifest inside a Kubernetes endpoint
|
||||
KubernetesDeployer interface {
|
||||
Deploy(endpoint *Endpoint, data string, namespace string) (string, error)
|
||||
Deploy(request *http.Request, endpoint *Endpoint, data string, namespace string) (string, error)
|
||||
ConvertCompose(data string) ([]byte, error)
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue