package cli
import (
"fmt"
"net/http"
"strconv"
"sync"
"time"
"github.com/patrickmn/go-cache"
"github.com/pkg/errors"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
const (
DefaultKubeClientQPS = 30
DefaultKubeClientBurst = 100
type (
// ClientFactory is used to create Kubernetes clients
ClientFactory struct {
dataStore dataservices.DataStore
reverseTunnelService portainer.ReverseTunnelService
signatureService portainer.DigitalSignatureService
instanceID string
endpointClients map[string]*KubeClient
endpointProxyClients *cache.Cache
AddrHTTPS string
mu sync.Mutex
}
// KubeClient represent a service used to execute Kubernetes operations
KubeClient struct {
cli kubernetes.Interface
// NewClientFactory returns a new instance of a ClientFactory
func NewClientFactory(signatureService portainer.DigitalSignatureService, reverseTunnelService portainer.ReverseTunnelService, dataStore dataservices.DataStore, instanceID, addrHTTPS, userSessionTimeout string) (*ClientFactory, error) {
if userSessionTimeout == "" {
userSessionTimeout = portainer.DefaultUserSessionTimeout
timeout, err := time.ParseDuration(userSessionTimeout)
if err != nil {
return nil, err
return &ClientFactory{
dataStore: dataStore,
signatureService: signatureService,
reverseTunnelService: reverseTunnelService,
instanceID: instanceID,
endpointClients: make(map[string]*KubeClient),
endpointProxyClients: cache.New(timeout, timeout),
AddrHTTPS: addrHTTPS,
}, nil
func (factory *ClientFactory) GetInstanceID() (instanceID string) {
return factory.instanceID
// Remove the cached kube client so a new one can be created
func (factory *ClientFactory) RemoveKubeClient(endpointID portainer.EndpointID) {
factory.mu.Lock()
delete(factory.endpointClients, strconv.Itoa(int(endpointID)))
factory.mu.Unlock()
// GetKubeClient checks if an existing client is already registered for the environment(endpoint) and returns it if one is found.
// If no client is registered, it will create a new client, register it, and returns it.
func (factory *ClientFactory) GetKubeClient(endpoint *portainer.Endpoint) (*KubeClient, error) {
defer factory.mu.Unlock()
key := strconv.Itoa(int(endpoint.ID))
client, ok := factory.endpointClients[key]
if !ok {
var err error
client, err = factory.createCachedAdminKubeClient(endpoint)
factory.endpointClients[key] = client
return client, nil
// GetProxyKubeClient retrieves a KubeClient from the cache. You should be
// calling SetProxyKubeClient before first. It is normally, called the
// kubernetes middleware.
func (factory *ClientFactory) GetProxyKubeClient(endpointID, token string) (*KubeClient, bool) {
client, ok := factory.endpointProxyClients.Get(endpointID + "." + token)
return nil, false
return client.(*KubeClient), true
// SetProxyKubeClient stores a kubeclient in the cache.
func (factory *ClientFactory) SetProxyKubeClient(endpointID, token string, cli *KubeClient) {
factory.endpointProxyClients.Set(endpointID+"."+token, cli, 0)
// CreateKubeClientFromKubeConfig creates a KubeClient from a clusterID, and
// Kubernetes config.
func (factory *ClientFactory) CreateKubeClientFromKubeConfig(clusterID string, kubeConfig []byte) (*KubeClient, error) {
config, err := clientcmd.NewClientConfigFromBytes([]byte(kubeConfig))
cliConfig, err := config.ClientConfig()
cliConfig.QPS = DefaultKubeClientQPS
cliConfig.Burst = DefaultKubeClientBurst
cli, err := kubernetes.NewForConfig(cliConfig)
return &KubeClient{
cli: cli,
instanceID: factory.instanceID,
func (factory *ClientFactory) createCachedAdminKubeClient(endpoint *portainer.Endpoint) (*KubeClient, error) {
cli, err := factory.CreateClient(endpoint)
// CreateClient returns a pointer to a new Clientset instance
func (factory *ClientFactory) CreateClient(endpoint *portainer.Endpoint) (*kubernetes.Clientset, error) {
switch endpoint.Type {
case portainer.KubernetesLocalEnvironment:
return buildLocalClient()
case portainer.AgentOnKubernetesEnvironment:
return factory.buildAgentClient(endpoint)
case portainer.EdgeAgentOnKubernetesEnvironment:
return factory.buildEdgeClient(endpoint)
return nil, errors.New("unsupported environment type")
type agentHeaderRoundTripper struct {
signatureHeader string
publicKeyHeader string
roundTripper http.RoundTripper
// RoundTrip is the implementation of the http.RoundTripper interface.
// It decorates the request with specific agent headers
func (rt *agentHeaderRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
req.Header.Add(portainer.PortainerAgentPublicKeyHeader, rt.publicKeyHeader)
req.Header.Add(portainer.PortainerAgentSignatureHeader, rt.signatureHeader)
return rt.roundTripper.RoundTrip(req)
func (factory *ClientFactory) buildAgentClient(endpoint *portainer.Endpoint) (*kubernetes.Clientset, error) {
endpointURL := fmt.Sprintf("https://%s/kubernetes", endpoint.URL)
return factory.createRemoteClient(endpointURL)
func (factory *ClientFactory) buildEdgeClient(endpoint *portainer.Endpoint) (*kubernetes.Clientset, error) {
tunnel, err := factory.reverseTunnelService.GetActiveTunnel(endpoint)
return nil, errors.Wrap(err, "failed activating tunnel")
endpointURL := fmt.Sprintf("http://127.0.0.1:%d/kubernetes", tunnel.Port)
func (factory *ClientFactory) createRemoteClient(endpointURL string) (*kubernetes.Clientset, error) {
signature, err := factory.signatureService.CreateSignature(portainer.PortainerAgentSignatureMessage)
config, err := clientcmd.BuildConfigFromFlags(endpointURL, "")
config.Insecure = true
config.QPS = DefaultKubeClientQPS
config.Burst = DefaultKubeClientBurst
config.Wrap(func(rt http.RoundTripper) http.RoundTripper {
return &agentHeaderRoundTripper{
signatureHeader: signature,
publicKeyHeader: factory.signatureService.EncodedPublicKey(),
roundTripper: rt,
})
return kubernetes.NewForConfig(config)
func buildLocalClient() (*kubernetes.Clientset, error) {
config, err := rest.InClusterConfig()
func (factory *ClientFactory) MigrateEndpointIngresses(e *portainer.Endpoint) error {
// classes is a list of controllers which have been manually added to the
// cluster setup view. These need to all be allowed globally, but then
// blocked in specific namespaces which they were not previously allowed in.
classes := e.Kubernetes.Configuration.IngressClasses
// We need a kube client to gather namespace level permissions. In pre-2.16
// versions of portainer, the namespace level permissions were stored by
// creating an actual ingress rule in the cluster with a particular
// annotation indicating that it's name (the class name) should be allowed.
cli, err := factory.GetKubeClient(e)
return err
detected, err := cli.GetIngressControllers()
// newControllers is a set of all currently detected controllers.
newControllers := make(map[string]struct{})
for _, controller := range detected {
newControllers[controller.ClassName] = struct{}{}
namespaces, err := cli.GetNamespaces()
// Set of namespaces, if any, in which "allow none" should be true.
allow := make(map[string]map[string]struct{})
for _, c := range classes {
allow[c.Name] = make(map[string]struct{})
allow["none"] = make(map[string]struct{})
for namespace := range namespaces {
// Compare old annotations with currently detected controllers.
ingresses, err := cli.GetIngresses(namespace)
return fmt.Errorf("failure getting ingresses during migration")
for _, ingress := range ingresses {
oldController, ok := ingress.Annotations["ingress.portainer.io/ingress-type"]
// Skip rules without our old annotation.
continue
if _, ok := newControllers[oldController]; ok {
// Skip rules which match a detected controller.
// TODO: Allow this particular controller.
allow[oldController][ingress.Namespace] = struct{}{}
allow["none"][ingress.Namespace] = struct{}{}
// Locally, disable "allow none" for namespaces not inside shouldAllowNone.
var newClasses []portainer.KubernetesIngressClassConfig
var blocked []string
if _, ok := allow[c.Name][namespace]; ok {
blocked = append(blocked, namespace)
newClasses = append(newClasses, portainer.KubernetesIngressClassConfig{
Name: c.Name,
Type: c.Type,
GloballyBlocked: false,
BlockedNamespaces: blocked,
// Handle "none".
if len(allow["none"]) != 0 {
e.Kubernetes.Configuration.AllowNoneIngressClass = true
var disallowNone []string
if _, ok := allow["none"][namespace]; ok {
disallowNone = append(disallowNone, namespace)
Name: "none",
Type: "custom",
BlockedNamespaces: disallowNone,
e.Kubernetes.Configuration.IngressClasses = newClasses
e.PostInitMigrations.MigrateIngresses = false
return factory.dataStore.Endpoint().UpdateEndpoint(e.ID, e)