mirror of https://github.com/portainer/portainer
chore(loops): remove loop var copy EE-7342 (#12023)
parent
6e7a42727a
commit
7fd1a644a6
|
@ -51,5 +51,5 @@ jobs:
|
|||
- name: GolangCI-Lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.55.2
|
||||
version: v1.59.1
|
||||
args: --timeout=10m -c .golangci.yaml
|
||||
|
|
|
@ -9,7 +9,7 @@ linters:
|
|||
- gosimple
|
||||
- govet
|
||||
- errorlint
|
||||
- exportloopref
|
||||
- copyloopvar
|
||||
|
||||
linters-settings:
|
||||
depguard:
|
||||
|
|
|
@ -87,10 +87,7 @@ func Test_NeedsEncryptionMigration(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
tc := tc
|
||||
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
connection := DbConnection{Path: dir}
|
||||
|
||||
if tc.dbname == "both" {
|
||||
|
|
|
@ -5,14 +5,15 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
portainer "github.com/portainer/portainer/api"
|
||||
dockerclient "github.com/portainer/portainer/api/docker/client"
|
||||
"github.com/portainer/portainer/api/docker/consts"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
_container "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
"github.com/docker/docker/client"
|
||||
portainer "github.com/portainer/portainer/api"
|
||||
dockerclient "github.com/portainer/portainer/api/docker/client"
|
||||
"github.com/portainer/portainer/api/docker/consts"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
|
@ -40,8 +41,7 @@ func (snapshotter *Snapshotter) CreateSnapshot(endpoint *portainer.Endpoint) (*p
|
|||
}
|
||||
|
||||
func snapshot(cli *client.Client, endpoint *portainer.Endpoint) (*portainer.DockerSnapshot, error) {
|
||||
_, err := cli.Ping(context.Background())
|
||||
if err != nil {
|
||||
if _, err := cli.Ping(context.Background()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -49,49 +49,42 @@ func snapshot(cli *client.Client, endpoint *portainer.Endpoint) (*portainer.Dock
|
|||
StackCount: 0,
|
||||
}
|
||||
|
||||
err = snapshotInfo(snapshot, cli)
|
||||
if err != nil {
|
||||
if err := snapshotInfo(snapshot, cli); err != nil {
|
||||
log.Warn().Str("environment", endpoint.Name).Err(err).Msg("unable to snapshot engine information")
|
||||
}
|
||||
|
||||
if snapshot.Swarm {
|
||||
err = snapshotSwarmServices(snapshot, cli)
|
||||
if err != nil {
|
||||
if err := snapshotSwarmServices(snapshot, cli); err != nil {
|
||||
log.Warn().Str("environment", endpoint.Name).Err(err).Msg("unable to snapshot Swarm services")
|
||||
}
|
||||
|
||||
err = snapshotNodes(snapshot, cli)
|
||||
if err != nil {
|
||||
if err := snapshotNodes(snapshot, cli); err != nil {
|
||||
log.Warn().Str("environment", endpoint.Name).Err(err).Msg("unable to snapshot Swarm nodes")
|
||||
}
|
||||
}
|
||||
|
||||
err = snapshotContainers(snapshot, cli)
|
||||
if err != nil {
|
||||
if err := snapshotContainers(snapshot, cli); err != nil {
|
||||
log.Warn().Str("environment", endpoint.Name).Err(err).Msg("unable to snapshot containers")
|
||||
}
|
||||
|
||||
err = snapshotImages(snapshot, cli)
|
||||
if err != nil {
|
||||
if err := snapshotImages(snapshot, cli); err != nil {
|
||||
log.Warn().Str("environment", endpoint.Name).Err(err).Msg("unable to snapshot images")
|
||||
}
|
||||
|
||||
err = snapshotVolumes(snapshot, cli)
|
||||
if err != nil {
|
||||
if err := snapshotVolumes(snapshot, cli); err != nil {
|
||||
log.Warn().Str("environment", endpoint.Name).Err(err).Msg("unable to snapshot volumes")
|
||||
}
|
||||
|
||||
err = snapshotNetworks(snapshot, cli)
|
||||
if err != nil {
|
||||
if err := snapshotNetworks(snapshot, cli); err != nil {
|
||||
log.Warn().Str("environment", endpoint.Name).Err(err).Msg("unable to snapshot networks")
|
||||
}
|
||||
|
||||
err = snapshotVersion(snapshot, cli)
|
||||
if err != nil {
|
||||
if err := snapshotVersion(snapshot, cli); err != nil {
|
||||
log.Warn().Str("environment", endpoint.Name).Err(err).Msg("unable to snapshot engine version")
|
||||
}
|
||||
|
||||
snapshot.Time = time.Now().Unix()
|
||||
|
||||
return snapshot, nil
|
||||
}
|
||||
|
||||
|
@ -106,6 +99,7 @@ func snapshotInfo(snapshot *portainer.DockerSnapshot, cli *client.Client) error
|
|||
snapshot.TotalCPU = info.NCPU
|
||||
snapshot.TotalMemory = info.MemTotal
|
||||
snapshot.SnapshotRaw.Info = info
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -114,15 +108,19 @@ func snapshotNodes(snapshot *portainer.DockerSnapshot, cli *client.Client) error
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var nanoCpus int64
|
||||
var totalMem int64
|
||||
|
||||
for _, node := range nodes {
|
||||
nanoCpus += node.Description.Resources.NanoCPUs
|
||||
totalMem += node.Description.Resources.MemoryBytes
|
||||
}
|
||||
|
||||
snapshot.TotalCPU = int(nanoCpus / 1e9)
|
||||
snapshot.TotalMemory = totalMem
|
||||
snapshot.NodeCount = len(nodes)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -144,6 +142,7 @@ func snapshotSwarmServices(snapshot *portainer.DockerSnapshot, cli *client.Clien
|
|||
|
||||
snapshot.ServiceCount = len(services)
|
||||
snapshot.StackCount += len(stacks)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -156,9 +155,10 @@ func snapshotContainers(snapshot *portainer.DockerSnapshot, cli *client.Client)
|
|||
stacks := make(map[string]struct{})
|
||||
gpuUseSet := make(map[string]struct{})
|
||||
gpuUseAll := false
|
||||
|
||||
for _, container := range containers {
|
||||
if container.State == "running" {
|
||||
// snapshot GPUs
|
||||
// Snapshot GPUs
|
||||
response, err := cli.ContainerInspect(context.Background(), container.ID)
|
||||
if err != nil {
|
||||
// Inspect a container will fail when the container runs on a different
|
||||
|
@ -177,7 +177,6 @@ func snapshotContainers(snapshot *portainer.DockerSnapshot, cli *client.Client)
|
|||
} else {
|
||||
var gpuOptions *_container.DeviceRequest = nil
|
||||
for _, deviceRequest := range response.HostConfig.Resources.DeviceRequests {
|
||||
deviceRequest := deviceRequest
|
||||
if deviceRequest.Driver == "nvidia" || deviceRequest.Capabilities[0][0] == "gpu" {
|
||||
gpuOptions = &deviceRequest
|
||||
}
|
||||
|
@ -187,6 +186,7 @@ func snapshotContainers(snapshot *portainer.DockerSnapshot, cli *client.Client)
|
|||
if gpuOptions.Count == -1 {
|
||||
gpuUseAll = true
|
||||
}
|
||||
|
||||
for _, id := range gpuOptions.DeviceIDs {
|
||||
gpuUseSet[id] = struct{}{}
|
||||
}
|
||||
|
@ -217,9 +217,11 @@ func snapshotContainers(snapshot *portainer.DockerSnapshot, cli *client.Client)
|
|||
snapshot.HealthyContainerCount = stats.Healthy
|
||||
snapshot.UnhealthyContainerCount = stats.Unhealthy
|
||||
snapshot.StackCount += len(stacks)
|
||||
|
||||
for _, container := range containers {
|
||||
snapshot.SnapshotRaw.Containers = append(snapshot.SnapshotRaw.Containers, portainer.DockerContainerSnapshot{Container: container})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -231,6 +233,7 @@ func snapshotImages(snapshot *portainer.DockerSnapshot, cli *client.Client) erro
|
|||
|
||||
snapshot.ImageCount = len(images)
|
||||
snapshot.SnapshotRaw.Images = images
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -242,6 +245,7 @@ func snapshotVolumes(snapshot *portainer.DockerSnapshot, cli *client.Client) err
|
|||
|
||||
snapshot.VolumeCount = len(volumes.Volumes)
|
||||
snapshot.SnapshotRaw.Volumes = volumes
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -250,7 +254,9 @@ func snapshotNetworks(snapshot *portainer.DockerSnapshot, cli *client.Client) er
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
snapshot.SnapshotRaw.Networks = networks
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -259,6 +265,8 @@ func snapshotVersion(snapshot *portainer.DockerSnapshot, cli *client.Client) err
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
snapshot.SnapshotRaw.Version = version
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -51,8 +51,7 @@ var errChartNameInvalid = errors.New("invalid chart name. " +
|
|||
// @router /endpoints/{id}/kubernetes/helm [post]
|
||||
func (handler *Handler) helmInstall(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
|
||||
var payload installChartPayload
|
||||
err := request.DecodeAndValidateJSONPayload(r, &payload)
|
||||
if err != nil {
|
||||
if err := request.DecodeAndValidateJSONPayload(r, &payload); err != nil {
|
||||
return httperror.BadRequest("Invalid Helm install payload", err)
|
||||
}
|
||||
|
||||
|
@ -69,15 +68,19 @@ func (p *installChartPayload) Validate(_ *http.Request) error {
|
|||
if p.Repo == "" {
|
||||
required = append(required, "repo")
|
||||
}
|
||||
|
||||
if p.Name == "" {
|
||||
required = append(required, "name")
|
||||
}
|
||||
|
||||
if p.Namespace == "" {
|
||||
required = append(required, "namespace")
|
||||
}
|
||||
|
||||
if p.Chart == "" {
|
||||
required = append(required, "chart")
|
||||
}
|
||||
|
||||
if len(required) > 0 {
|
||||
return fmt.Errorf("required field(s) missing: %s", strings.Join(required, ", "))
|
||||
}
|
||||
|
@ -94,6 +97,7 @@ func (handler *Handler) installChart(r *http.Request, p installChartPayload) (*r
|
|||
if httperr != nil {
|
||||
return nil, httperr.Err
|
||||
}
|
||||
|
||||
installOpts := options.InstallOptions{
|
||||
Name: p.Name,
|
||||
Chart: p.Chart,
|
||||
|
@ -112,15 +116,16 @@ func (handler *Handler) installChart(r *http.Request, p installChartPayload) (*r
|
|||
return nil, err
|
||||
}
|
||||
defer os.Remove(file.Name())
|
||||
_, err = file.WriteString(p.Values)
|
||||
if err != nil {
|
||||
|
||||
if _, err := file.WriteString(p.Values); err != nil {
|
||||
file.Close()
|
||||
return nil, err
|
||||
}
|
||||
err = file.Close()
|
||||
if err != nil {
|
||||
|
||||
if err := file.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
installOpts.ValuesFile = file.Name()
|
||||
}
|
||||
|
||||
|
@ -134,8 +139,7 @@ func (handler *Handler) installChart(r *http.Request, p installChartPayload) (*r
|
|||
return nil, err
|
||||
}
|
||||
|
||||
err = handler.updateHelmAppManifest(r, manifest, installOpts.Namespace)
|
||||
if err != nil {
|
||||
if err := handler.updateHelmAppManifest(r, manifest, installOpts.Namespace); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -151,12 +155,14 @@ func (handler *Handler) applyPortainerLabelsToHelmAppManifest(r *http.Request, i
|
|||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to retrieve user details from authentication token")
|
||||
}
|
||||
|
||||
user, err := handler.dataStore.User().Read(tokenData.ID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to load user information from the database")
|
||||
}
|
||||
|
||||
appLabels := kubernetes.GetHelmAppLabels(installOpts.Name, user.Username)
|
||||
|
||||
labeledManifest, err := kubernetes.AddAppLabels([]byte(manifest), appLabels)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to label helm release manifest")
|
||||
|
@ -180,16 +186,15 @@ func (handler *Handler) updateHelmAppManifest(r *http.Request, manifest []byte,
|
|||
return errors.Wrap(err, "unable to retrieve user details from authentication token")
|
||||
}
|
||||
|
||||
// extract list of yaml resources from helm manifest
|
||||
// Extract list of YAML resources from Helm manifest
|
||||
yamlResources, err := kubernetes.ExtractDocuments(manifest, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to extract documents from helm release manifest")
|
||||
}
|
||||
|
||||
// deploy individual resources in parallel
|
||||
// Deploy individual resources in parallel
|
||||
g := new(errgroup.Group)
|
||||
for _, resource := range yamlResources {
|
||||
resource := resource // https://golang.org/doc/faq#closures_and_goroutines
|
||||
g.Go(func() error {
|
||||
tmpfile, err := os.CreateTemp("", "helm-manifest-*")
|
||||
if err != nil {
|
||||
|
@ -214,9 +219,11 @@ func (handler *Handler) updateHelmAppManifest(r *http.Request, manifest []byte,
|
|||
}
|
||||
|
||||
_, err = handler.kubernetesDeployer.Deploy(tokenData.ID, endpoint, []string{tmpfile.Name()}, resourceNamespace)
|
||||
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
return errors.Wrap(err, "unable to patch helm release using kubectl")
|
||||
}
|
||||
|
|
|
@ -42,7 +42,6 @@ func createRegistryAuthenticationHeader(
|
|||
var matchingRegistry *portainer.Registry
|
||||
|
||||
for _, registry := range accessContext.registries {
|
||||
registry := registry
|
||||
if registry.ID == registryID &&
|
||||
(accessContext.isAdmin ||
|
||||
security.AuthorizedRegistryAccess(®istry, accessContext.user, accessContext.teamMemberships, accessContext.endpointID)) {
|
||||
|
@ -52,14 +51,16 @@ func createRegistryAuthenticationHeader(
|
|||
}
|
||||
}
|
||||
|
||||
if matchingRegistry != nil {
|
||||
if err = registryutils.EnsureRegTokenValid(dataStore, matchingRegistry); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
authenticationHeader.Serveraddress = matchingRegistry.URL
|
||||
authenticationHeader.Username, authenticationHeader.Password, err = registryutils.GetRegEffectiveCredential(matchingRegistry)
|
||||
if matchingRegistry == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = registryutils.EnsureRegTokenValid(dataStore, matchingRegistry); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
authenticationHeader.Serveraddress = matchingRegistry.URL
|
||||
authenticationHeader.Username, authenticationHeader.Password, err = registryutils.GetRegEffectiveCredential(matchingRegistry)
|
||||
|
||||
return
|
||||
}
|
||||
|
|
|
@ -18,9 +18,9 @@ func EdgeStackRelatedEndpoints(edgeGroupIDs []portainer.EdgeGroupID, endpoints [
|
|||
var edgeGroup *portainer.EdgeGroup
|
||||
|
||||
for _, group := range edgeGroups {
|
||||
group := group
|
||||
if group.ID == edgeGroupID {
|
||||
edgeGroup = &group
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue