mirror of https://github.com/portainer/portainer
fix(token-cache-manager): refactor to avoid data races EE-4438 (#8094)
parent
dd01165224
commit
c28be7aced
|
@ -49,7 +49,7 @@ func (deployer *KubernetesDeployer) getToken(userID portainer.UserID, endpoint *
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
tokenCache := deployer.kubernetesTokenCacheManager.GetOrCreateTokenCache(int(endpoint.ID))
|
tokenCache := deployer.kubernetesTokenCacheManager.GetOrCreateTokenCache(endpoint.ID)
|
||||||
|
|
||||||
tokenManager, err := kubernetes.NewTokenManager(kubeCLI, deployer.dataStore, tokenCache, setLocalAdminToken)
|
tokenManager, err := kubernetes.NewTokenManager(kubeCLI, deployer.dataStore, tokenCache, setLocalAdminToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -23,7 +23,7 @@ func (handler *Handler) logout(w http.ResponseWriter, r *http.Request) *httperro
|
||||||
return httperror.InternalServerError("Unable to retrieve user details from authentication token", err)
|
return httperror.InternalServerError("Unable to retrieve user details from authentication token", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
handler.KubernetesTokenCacheManager.RemoveUserFromCache(int(tokenData.ID))
|
handler.KubernetesTokenCacheManager.RemoveUserFromCache(tokenData.ID)
|
||||||
|
|
||||||
return response.Empty(w)
|
return response.Empty(w)
|
||||||
}
|
}
|
||||||
|
|
|
@ -170,7 +170,7 @@ func (handler *Handler) getToken(request *http.Request, endpoint *portainer.Endp
|
||||||
return "", false, err
|
return "", false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
tokenCache := handler.kubernetesTokenCacheManager.GetOrCreateTokenCache(int(endpoint.ID))
|
tokenCache := handler.kubernetesTokenCacheManager.GetOrCreateTokenCache(endpoint.ID)
|
||||||
|
|
||||||
tokenManager, err := kubernetes.NewTokenManager(kubecli, handler.DataStore, tokenCache, setLocalAdminToken)
|
tokenManager, err := kubernetes.NewTokenManager(kubecli, handler.DataStore, tokenCache, setLocalAdminToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -33,7 +33,7 @@ func (factory *ProxyFactory) newKubernetesLocalProxy(endpoint *portainer.Endpoin
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
tokenCache := factory.kubernetesTokenCacheManager.CreateTokenCache(int(endpoint.ID))
|
tokenCache := factory.kubernetesTokenCacheManager.GetOrCreateTokenCache(endpoint.ID)
|
||||||
tokenManager, err := kubernetes.NewTokenManager(kubecli, factory.dataStore, tokenCache, true)
|
tokenManager, err := kubernetes.NewTokenManager(kubecli, factory.dataStore, tokenCache, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -64,7 +64,7 @@ func (factory *ProxyFactory) newKubernetesEdgeHTTPProxy(endpoint *portainer.Endp
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
tokenCache := factory.kubernetesTokenCacheManager.CreateTokenCache(int(endpoint.ID))
|
tokenCache := factory.kubernetesTokenCacheManager.GetOrCreateTokenCache(endpoint.ID)
|
||||||
tokenManager, err := kubernetes.NewTokenManager(kubecli, factory.dataStore, tokenCache, false)
|
tokenManager, err := kubernetes.NewTokenManager(kubecli, factory.dataStore, tokenCache, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -96,7 +96,7 @@ func (factory *ProxyFactory) newKubernetesAgentHTTPSProxy(endpoint *portainer.En
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
tokenCache := factory.kubernetesTokenCacheManager.CreateTokenCache(int(endpoint.ID))
|
tokenCache := factory.kubernetesTokenCacheManager.GetOrCreateTokenCache(endpoint.ID)
|
||||||
tokenManager, err := kubernetes.NewTokenManager(kubecli, factory.dataStore, tokenCache, false)
|
tokenManager, err := kubernetes.NewTokenManager(kubecli, factory.dataStore, tokenCache, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -43,18 +43,15 @@ func (manager *tokenManager) GetAdminServiceAccountToken() string {
|
||||||
return manager.adminToken
|
return manager.adminToken
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetUserServiceAccountToken setup a user's service account if it does not exist, then retrieve its token
|
||||||
func (manager *tokenManager) GetUserServiceAccountToken(userID int, endpointID portainer.EndpointID) (string, error) {
|
func (manager *tokenManager) GetUserServiceAccountToken(userID int, endpointID portainer.EndpointID) (string, error) {
|
||||||
manager.tokenCache.mutex.Lock()
|
tokenFunc := func() (string, error) {
|
||||||
defer manager.tokenCache.mutex.Unlock()
|
|
||||||
|
|
||||||
token, ok := manager.tokenCache.getToken(userID)
|
|
||||||
if !ok {
|
|
||||||
memberships, err := manager.dataStore.TeamMembership().TeamMembershipsByUserID(portainer.UserID(userID))
|
memberships, err := manager.dataStore.TeamMembership().TeamMembershipsByUserID(portainer.UserID(userID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
teamIds := make([]int, 0)
|
teamIds := make([]int, 0, len(memberships))
|
||||||
for _, membership := range memberships {
|
for _, membership := range memberships {
|
||||||
teamIds = append(teamIds, int(membership.TeamID))
|
teamIds = append(teamIds, int(membership.TeamID))
|
||||||
}
|
}
|
||||||
|
@ -70,14 +67,8 @@ func (manager *tokenManager) GetUserServiceAccountToken(userID int, endpointID p
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
serviceAccountToken, err := manager.kubecli.GetServiceAccountBearerToken(userID)
|
return manager.kubecli.GetServiceAccountBearerToken(userID)
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
manager.tokenCache.addToken(userID, serviceAccountToken)
|
|
||||||
token = serviceAccountToken
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return token, nil
|
return manager.tokenCache.getOrAddToken(portainer.UserID(userID), tokenFunc)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,84 +1,78 @@
|
||||||
package kubernetes
|
package kubernetes
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strconv"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
cmap "github.com/orcaman/concurrent-map"
|
portainer "github.com/portainer/portainer/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
// TokenCacheManager represents a service used to manage multiple tokenCache objects.
|
||||||
// TokenCacheManager represents a service used to manage multiple tokenCache objects.
|
type TokenCacheManager struct {
|
||||||
TokenCacheManager struct {
|
tokenCaches map[portainer.EndpointID]*tokenCache
|
||||||
tokenCaches cmap.ConcurrentMap
|
mu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
tokenCache struct {
|
type tokenCache struct {
|
||||||
userTokenCache cmap.ConcurrentMap
|
userTokenCache map[portainer.UserID]string
|
||||||
mutex sync.Mutex
|
mu sync.Mutex
|
||||||
}
|
}
|
||||||
)
|
|
||||||
|
|
||||||
// NewTokenCacheManager returns a pointer to a new instance of TokenCacheManager
|
// NewTokenCacheManager returns a pointer to a new instance of TokenCacheManager
|
||||||
func NewTokenCacheManager() *TokenCacheManager {
|
func NewTokenCacheManager() *TokenCacheManager {
|
||||||
return &TokenCacheManager{
|
return &TokenCacheManager{
|
||||||
tokenCaches: cmap.New(),
|
tokenCaches: make(map[portainer.EndpointID]*tokenCache),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateTokenCache will create a new tokenCache object, associate it to the manager map of caches
|
|
||||||
// and return a pointer to that tokenCache instance.
|
|
||||||
func (manager *TokenCacheManager) CreateTokenCache(endpointID int) *tokenCache {
|
|
||||||
tokenCache := newTokenCache()
|
|
||||||
|
|
||||||
key := strconv.Itoa(endpointID)
|
|
||||||
manager.tokenCaches.Set(key, tokenCache)
|
|
||||||
|
|
||||||
return tokenCache
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetOrCreateTokenCache will get the tokenCache from the manager map of caches if it exists,
|
// GetOrCreateTokenCache will get the tokenCache from the manager map of caches if it exists,
|
||||||
// otherwise it will create a new tokenCache object, associate it to the manager map of caches
|
// otherwise it will create a new tokenCache object, associate it to the manager map of caches
|
||||||
// and return a pointer to that tokenCache instance.
|
// and return a pointer to that tokenCache instance.
|
||||||
func (manager *TokenCacheManager) GetOrCreateTokenCache(endpointID int) *tokenCache {
|
func (manager *TokenCacheManager) GetOrCreateTokenCache(endpointID portainer.EndpointID) *tokenCache {
|
||||||
key := strconv.Itoa(endpointID)
|
manager.mu.Lock()
|
||||||
if epCache, ok := manager.tokenCaches.Get(key); ok {
|
defer manager.mu.Unlock()
|
||||||
return epCache.(*tokenCache)
|
|
||||||
|
if tc, ok := manager.tokenCaches[endpointID]; ok {
|
||||||
|
return tc
|
||||||
}
|
}
|
||||||
|
|
||||||
return manager.CreateTokenCache(endpointID)
|
tc := &tokenCache{
|
||||||
|
userTokenCache: make(map[portainer.UserID]string),
|
||||||
|
}
|
||||||
|
|
||||||
|
manager.tokenCaches[endpointID] = tc
|
||||||
|
|
||||||
|
return tc
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveUserFromCache will ensure that the specific userID is removed from all registered caches.
|
// RemoveUserFromCache will ensure that the specific userID is removed from all registered caches.
|
||||||
func (manager *TokenCacheManager) RemoveUserFromCache(userID int) {
|
func (manager *TokenCacheManager) RemoveUserFromCache(userID portainer.UserID) {
|
||||||
for cache := range manager.tokenCaches.IterBuffered() {
|
manager.mu.Lock()
|
||||||
cache.Val.(*tokenCache).removeToken(userID)
|
for _, tc := range manager.tokenCaches {
|
||||||
|
tc.removeToken(userID)
|
||||||
}
|
}
|
||||||
|
manager.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTokenCache() *tokenCache {
|
func (cache *tokenCache) getOrAddToken(userID portainer.UserID, tokenGetFunc func() (string, error)) (string, error) {
|
||||||
return &tokenCache{
|
cache.mu.Lock()
|
||||||
userTokenCache: cmap.New(),
|
defer cache.mu.Unlock()
|
||||||
mutex: sync.Mutex{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cache *tokenCache) getToken(userID int) (string, bool) {
|
if tok, ok := cache.userTokenCache[userID]; ok {
|
||||||
key := strconv.Itoa(userID)
|
return tok, nil
|
||||||
token, ok := cache.userTokenCache.Get(key)
|
|
||||||
if ok {
|
|
||||||
return token.(string), true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return "", false
|
tok, err := tokenGetFunc()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
cache.userTokenCache[userID] = tok
|
||||||
|
|
||||||
|
return tok, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *tokenCache) addToken(userID int, token string) {
|
func (cache *tokenCache) removeToken(userID portainer.UserID) {
|
||||||
key := strconv.Itoa(userID)
|
cache.mu.Lock()
|
||||||
cache.userTokenCache.Set(key, token)
|
delete(cache.userTokenCache, userID)
|
||||||
}
|
cache.mu.Unlock()
|
||||||
|
|
||||||
func (cache *tokenCache) removeToken(userID int) {
|
|
||||||
key := strconv.Itoa(userID)
|
|
||||||
cache.userTokenCache.Remove(key)
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,102 @@
|
||||||
|
package kubernetes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
portainer "github.com/portainer/portainer/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
func noTokFunc() (string, error) {
|
||||||
|
return "", errors.New("no token found")
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringTok(tok string) func() (string, error) {
|
||||||
|
return func() (string, error) {
|
||||||
|
return tok, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func failFunc(t *testing.T) func() (string, error) {
|
||||||
|
return func() (string, error) {
|
||||||
|
t.FailNow()
|
||||||
|
return noTokFunc()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTokenCacheDataRace(t *testing.T) {
|
||||||
|
ch := make(chan struct{})
|
||||||
|
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
var tokenCache1, tokenCache2 *tokenCache
|
||||||
|
|
||||||
|
mgr := NewTokenCacheManager()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
tokenCache1 = mgr.GetOrCreateTokenCache(1)
|
||||||
|
ch <- struct{}{}
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
tokenCache2 = mgr.GetOrCreateTokenCache(1)
|
||||||
|
ch <- struct{}{}
|
||||||
|
}()
|
||||||
|
|
||||||
|
<-ch
|
||||||
|
<-ch
|
||||||
|
|
||||||
|
if tokenCache1 != tokenCache2 {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTokenCache(t *testing.T) {
|
||||||
|
mgr := NewTokenCacheManager()
|
||||||
|
tc1 := mgr.GetOrCreateTokenCache(1)
|
||||||
|
tc2 := mgr.GetOrCreateTokenCache(2)
|
||||||
|
tc3 := mgr.GetOrCreateTokenCache(3)
|
||||||
|
|
||||||
|
uid := portainer.UserID(2)
|
||||||
|
tokenString1 := "token-string-1"
|
||||||
|
tokenString2 := "token-string-2"
|
||||||
|
|
||||||
|
tok, err := tc1.getOrAddToken(uid, stringTok(tokenString1))
|
||||||
|
if err != nil || tok != tokenString1 {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
|
||||||
|
tok, err = tc1.getOrAddToken(uid, failFunc(t))
|
||||||
|
if err != nil || tok != tokenString1 {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
|
||||||
|
tok, err = tc2.getOrAddToken(uid, stringTok(tokenString2))
|
||||||
|
if err != nil || tok != tokenString2 {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = tc3.getOrAddToken(uid, noTokFunc)
|
||||||
|
if err == nil {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove one user from all the caches
|
||||||
|
|
||||||
|
mgr.RemoveUserFromCache(uid)
|
||||||
|
|
||||||
|
_, err = tc1.getOrAddToken(uid, noTokFunc)
|
||||||
|
if err == nil {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = tc2.getOrAddToken(uid, noTokFunc)
|
||||||
|
if err == nil {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = tc3.getOrAddToken(uid, noTokFunc)
|
||||||
|
if err == nil {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue