mirror of https://github.com/portainer/portainer
feat(dashboard): dashboard api [EE-7111] (#11844)
parent
61ef133bb8
commit
6e0dd34cc8
@ -0,0 +1,37 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
httperror "github.com/portainer/portainer/pkg/libhttp/error"
|
||||
"github.com/portainer/portainer/pkg/libhttp/response"
|
||||
)
|
||||
|
||||
// @id GetKubernetesDashboard
|
||||
// @summary Get the dashboard summary data
|
||||
// @description Get the dashboard summary data which is simply a count of a range of different commonly used kubernetes resources
|
||||
// @description **Access policy**: authenticated
|
||||
// @tags kubernetes
|
||||
// @security ApiKeyAuth
|
||||
// @security jwt
|
||||
// @accept json
|
||||
// @produce json
|
||||
// @param id path int true "Environment (Endpoint) identifier"
|
||||
// @success 200 {array} kubernetes.K8sDashboard "Success"
|
||||
// @failure 400 "Invalid request"
|
||||
// @failure 500 "Server error"
|
||||
// @router /kubernetes/{id}/dashboard [get]
|
||||
func (handler *Handler) getKubernetesDashboard(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
|
||||
|
||||
cli, httpErr := handler.getProxyKubeClient(r)
|
||||
if httpErr != nil {
|
||||
return httpErr
|
||||
}
|
||||
|
||||
dashboard, err := cli.GetDashboard()
|
||||
if err != nil {
|
||||
return httperror.InternalServerError("Unable to retrieve dashboard data", err)
|
||||
}
|
||||
|
||||
return response.JSON(w, dashboard)
|
||||
}
|
@ -0,0 +1,13 @@
|
||||
package kubernetes
|
||||
|
||||
type (
|
||||
K8sDashboard struct {
|
||||
NamespacesCount int64 `json:"namespacesCount"`
|
||||
ApplicationsCount int64 `json:"applicationsCount"`
|
||||
ServicesCount int64 `json:"servicesCount"`
|
||||
IngressesCount int64 `json:"ingressesCount"`
|
||||
ConfigMapsCount int64 `json:"configMapsCount"`
|
||||
SecretsCount int64 `json:"secretsCount"`
|
||||
VolumesCount int64 `json:"volumesCount"`
|
||||
}
|
||||
)
|
@ -0,0 +1,144 @@
|
||||
// Package concurrent provides utilities for running multiple functions concurrently in Go.
|
||||
// For example, many kubernetes calls can take a while to fulfill. Oftentimes in Portainer
|
||||
// we need to get a list of objects from multiple kubernetes REST APIs. We can often call these
|
||||
// apis concurrently to speed up the response time.
|
||||
// This package provides a clean way to do just that.
|
||||
//
|
||||
// Examples:
|
||||
// The ConfigMaps and Secrets function converted using concurrent.Run.
|
||||
/*
|
||||
|
||||
// GetConfigMapsAndSecrets gets all the ConfigMaps AND all the Secrets for a
|
||||
// given namespace in a k8s endpoint. The result is a list of both config maps
|
||||
// and secrets. The IsSecret boolean property indicates if a given struct is a
|
||||
// secret or configmap.
|
||||
func (kcl *KubeClient) GetConfigMapsAndSecrets(namespace string) ([]models.K8sConfigMapOrSecret, error) {
|
||||
|
||||
// use closures to capture the current kube client and namespace by declaring wrapper functions
|
||||
// that match the interface signature for concurrent.Func
|
||||
|
||||
listConfigMaps := func(ctx context.Context) (interface{}, error) {
|
||||
return kcl.cli.CoreV1().ConfigMaps(namespace).List(context.Background(), meta.ListOptions{})
|
||||
}
|
||||
|
||||
listSecrets := func(ctx context.Context) (interface{}, error) {
|
||||
return kcl.cli.CoreV1().Secrets(namespace).List(context.Background(), meta.ListOptions{})
|
||||
}
|
||||
|
||||
// run the functions concurrently and wait for results. We can also pass in a context to cancel.
|
||||
// e.g. Deadline timer.
|
||||
results, err := concurrent.Run(context.TODO(), listConfigMaps, listSecrets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var configMapList *core.ConfigMapList
|
||||
var secretList *core.SecretList
|
||||
for _, r := range results {
|
||||
switch v := r.Result.(type) {
|
||||
case *core.ConfigMapList:
|
||||
configMapList = v
|
||||
case *core.SecretList:
|
||||
secretList = v
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Applications
|
||||
var combined []models.K8sConfigMapOrSecret
|
||||
for _, m := range configMapList.Items {
|
||||
var cm models.K8sConfigMapOrSecret
|
||||
cm.UID = string(m.UID)
|
||||
cm.Name = m.Name
|
||||
cm.Namespace = m.Namespace
|
||||
cm.Annotations = m.Annotations
|
||||
cm.Data = m.Data
|
||||
cm.CreationDate = m.CreationTimestamp.Time.UTC().Format(time.RFC3339)
|
||||
combined = append(combined, cm)
|
||||
}
|
||||
|
||||
for _, s := range secretList.Items {
|
||||
var secret models.K8sConfigMapOrSecret
|
||||
secret.UID = string(s.UID)
|
||||
secret.Name = s.Name
|
||||
secret.Namespace = s.Namespace
|
||||
secret.Annotations = s.Annotations
|
||||
secret.Data = msbToMss(s.Data)
|
||||
secret.CreationDate = s.CreationTimestamp.Time.UTC().Format(time.RFC3339)
|
||||
secret.IsSecret = true
|
||||
secret.SecretType = string(s.Type)
|
||||
combined = append(combined, secret)
|
||||
}
|
||||
|
||||
return combined, nil
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
package concurrent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Result contains the result and any error returned from running a client task function
|
||||
type Result struct {
|
||||
Result any // the result of running the task function
|
||||
Err error // any error that occurred while running the task function
|
||||
}
|
||||
|
||||
// Func is a function returns a result or error
|
||||
type Func func(ctx context.Context) (any, error)
|
||||
|
||||
// Run runs a list of functions returns the results
|
||||
func Run(ctx context.Context, maxConcurrency int, tasks ...Func) ([]Result, error) {
|
||||
var wg sync.WaitGroup
|
||||
resultsChan := make(chan Result, len(tasks))
|
||||
taskChan := make(chan Func, len(tasks))
|
||||
|
||||
localCtx, cancelCtx := context.WithCancel(ctx)
|
||||
defer cancelCtx()
|
||||
|
||||
runTask := func() {
|
||||
defer wg.Done()
|
||||
for fn := range taskChan {
|
||||
result, err := fn(localCtx)
|
||||
resultsChan <- Result{Result: result, Err: err}
|
||||
}
|
||||
}
|
||||
|
||||
// Set maxConcurrency to the number of tasks if zero or negative
|
||||
if maxConcurrency <= 0 {
|
||||
maxConcurrency = len(tasks)
|
||||
}
|
||||
|
||||
// Start worker goroutines
|
||||
for i := 0; i < maxConcurrency; i++ {
|
||||
wg.Add(1)
|
||||
go runTask()
|
||||
}
|
||||
|
||||
// Add tasks to the task channel
|
||||
for _, fn := range tasks {
|
||||
taskChan <- fn
|
||||
}
|
||||
|
||||
// Close the task channel to signal workers to stop when all tasks are done
|
||||
close(taskChan)
|
||||
|
||||
// Wait for all workers to complete
|
||||
wg.Wait()
|
||||
close(resultsChan)
|
||||
|
||||
// Collect the results and cancel on error
|
||||
results := make([]Result, 0, len(tasks))
|
||||
for r := range resultsChan {
|
||||
if r.Err != nil {
|
||||
cancelCtx()
|
||||
return nil, r.Err
|
||||
}
|
||||
results = append(results, r)
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
@ -0,0 +1,154 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
models "github.com/portainer/portainer/api/http/models/kubernetes"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// GetApplications gets a list of kubernetes workloads (or applications) by kind. If Kind is not specified, gets the all
|
||||
func (kcl *KubeClient) GetApplications(namespace, kind string) ([]models.K8sApplication, error) {
|
||||
applicationList := []models.K8sApplication{}
|
||||
listOpts := metav1.ListOptions{}
|
||||
|
||||
if kind == "" || strings.EqualFold(kind, "deployment") {
|
||||
deployments, err := kcl.cli.AppsV1().Deployments(namespace).List(context.TODO(), listOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, d := range deployments.Items {
|
||||
applicationList = append(applicationList, models.K8sApplication{
|
||||
UID: string(d.UID),
|
||||
Name: d.Name,
|
||||
Namespace: d.Namespace,
|
||||
Kind: "Deployment",
|
||||
Labels: d.Labels,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if kind == "" || strings.EqualFold(kind, "statefulset") {
|
||||
statefulSets, err := kcl.cli.AppsV1().StatefulSets(namespace).List(context.TODO(), listOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, s := range statefulSets.Items {
|
||||
applicationList = append(applicationList, models.K8sApplication{
|
||||
UID: string(s.UID),
|
||||
Name: s.Name,
|
||||
Namespace: s.Namespace,
|
||||
Kind: "StatefulSet",
|
||||
Labels: s.Labels,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if kind == "" || strings.EqualFold(kind, "daemonset") {
|
||||
daemonSets, err := kcl.cli.AppsV1().DaemonSets(namespace).List(context.TODO(), listOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, d := range daemonSets.Items {
|
||||
applicationList = append(applicationList, models.K8sApplication{
|
||||
UID: string(d.UID),
|
||||
Name: d.Name,
|
||||
Namespace: d.Namespace,
|
||||
Kind: "DaemonSet",
|
||||
Labels: d.Labels,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if kind == "" || strings.EqualFold(kind, "nakedpods") {
|
||||
pods, _ := kcl.cli.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{})
|
||||
for _, pod := range pods.Items {
|
||||
naked := false
|
||||
if len(pod.OwnerReferences) == 0 {
|
||||
naked = true
|
||||
} else {
|
||||
managed := false
|
||||
loop:
|
||||
for _, ownerRef := range pod.OwnerReferences {
|
||||
switch ownerRef.Kind {
|
||||
case "Deployment", "DaemonSet", "ReplicaSet":
|
||||
managed = true
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
if !managed {
|
||||
naked = true
|
||||
}
|
||||
}
|
||||
|
||||
if naked {
|
||||
applicationList = append(applicationList, models.K8sApplication{
|
||||
UID: string(pod.UID),
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
Kind: "Pod",
|
||||
Labels: pod.Labels,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return applicationList, nil
|
||||
}
|
||||
|
||||
// GetApplication gets a kubernetes workload (application) by kind and name. If Kind is not specified, gets the all
|
||||
func (kcl *KubeClient) GetApplication(namespace, kind, name string) (models.K8sApplication, error) {
|
||||
|
||||
opts := metav1.GetOptions{}
|
||||
|
||||
switch strings.ToLower(kind) {
|
||||
case "deployment":
|
||||
d, err := kcl.cli.AppsV1().Deployments(namespace).Get(context.TODO(), name, opts)
|
||||
if err != nil {
|
||||
return models.K8sApplication{}, err
|
||||
}
|
||||
|
||||
return models.K8sApplication{
|
||||
UID: string(d.UID),
|
||||
Name: d.Name,
|
||||
Namespace: d.Namespace,
|
||||
Kind: "Deployment",
|
||||
Labels: d.Labels,
|
||||
}, nil
|
||||
|
||||
case "statefulset":
|
||||
s, err := kcl.cli.AppsV1().StatefulSets(namespace).Get(context.TODO(), name, opts)
|
||||
if err != nil {
|
||||
return models.K8sApplication{}, err
|
||||
}
|
||||
|
||||
return models.K8sApplication{
|
||||
UID: string(s.UID),
|
||||
Name: s.Name,
|
||||
Namespace: s.Namespace,
|
||||
Kind: "StatefulSet",
|
||||
Labels: s.Labels,
|
||||
}, nil
|
||||
|
||||
case "daemonset":
|
||||
d, err := kcl.cli.AppsV1().DaemonSets(namespace).Get(context.TODO(), name, opts)
|
||||
if err != nil {
|
||||
return models.K8sApplication{}, err
|
||||
}
|
||||
|
||||
return models.K8sApplication{
|
||||
UID: string(d.UID),
|
||||
Name: d.Name,
|
||||
Namespace: d.Namespace,
|
||||
Kind: "DaemonSet",
|
||||
Labels: d.Labels,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return models.K8sApplication{}, nil
|
||||
}
|
@ -0,0 +1,258 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
models "github.com/portainer/portainer/api/http/models/kubernetes"
|
||||
"github.com/portainer/portainer/api/internal/concurrent"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func (kcl *KubeClient) GetDashboard() (models.K8sDashboard, error) {
|
||||
dashboardData := models.K8sDashboard{}
|
||||
|
||||
// Get a list of all the namespaces first
|
||||
namespaces, err := kcl.cli.CoreV1().Namespaces().List(context.TODO(), v1.ListOptions{})
|
||||
if err != nil {
|
||||
return dashboardData, err
|
||||
}
|
||||
|
||||
getNamespaceCounts := func(namespace string) concurrent.Func {
|
||||
return func(ctx context.Context) (any, error) {
|
||||
data := models.K8sDashboard{}
|
||||
|
||||
// apps (deployments, statefulsets, daemonsets)
|
||||
applicationCount, err := getApplicationsCount(ctx, kcl, namespace)
|
||||
if err != nil {
|
||||
// skip namespaces we're not allowed access to. But don't return an error so that we
|
||||
// can still count the other namespaces. Returning an error here will stop concurrent.Run
|
||||
if errors.IsForbidden(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
data.ApplicationsCount = applicationCount
|
||||
|
||||
// services
|
||||
serviceCount, err := getServicesCount(ctx, kcl, namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data.ServicesCount = serviceCount
|
||||
|
||||
// ingresses
|
||||
ingressesCount, err := getIngressesCount(ctx, kcl, namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data.IngressesCount = ingressesCount
|
||||
|
||||
// configmaps
|
||||
configMapCount, err := getConfigMapsCount(ctx, kcl, namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data.ConfigMapsCount = configMapCount
|
||||
|
||||
// secrets
|
||||
secretsCount, err := getSecretsCount(ctx, kcl, namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data.SecretsCount = secretsCount
|
||||
|
||||
// volumes
|
||||
volumesCount, err := getVolumesCount(ctx, kcl, namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data.VolumesCount = volumesCount
|
||||
|
||||
// count this namespace for the user
|
||||
data.NamespacesCount = 1
|
||||
|
||||
return data, nil
|
||||
}
|
||||
}
|
||||
|
||||
dashboardTasks := make([]concurrent.Func, 0)
|
||||
for _, ns := range namespaces.Items {
|
||||
dashboardTasks = append(dashboardTasks, getNamespaceCounts(ns.Name))
|
||||
}
|
||||
|
||||
// Fetch all the data for each namespace concurrently
|
||||
results, err := concurrent.Run(context.TODO(), maxConcurrency, dashboardTasks...)
|
||||
if err != nil {
|
||||
return dashboardData, err
|
||||
}
|
||||
|
||||
// Sum up the results
|
||||
for i := range results {
|
||||
data, _ := results[i].Result.(models.K8sDashboard)
|
||||
dashboardData.NamespacesCount += data.NamespacesCount
|
||||
dashboardData.ApplicationsCount += data.ApplicationsCount
|
||||
dashboardData.ServicesCount += data.ServicesCount
|
||||
dashboardData.IngressesCount += data.IngressesCount
|
||||
dashboardData.ConfigMapsCount += data.ConfigMapsCount
|
||||
dashboardData.SecretsCount += data.SecretsCount
|
||||
dashboardData.VolumesCount += data.VolumesCount
|
||||
}
|
||||
|
||||
return dashboardData, nil
|
||||
}
|
||||
|
||||
// Get applications excluding nakedpods
|
||||
func getApplicationsCount(ctx context.Context, kcl *KubeClient, namespace string) (int64, error) {
|
||||
options := v1.ListOptions{Limit: 1}
|
||||
count := int64(0)
|
||||
|
||||
// deployments
|
||||
deployments, err := kcl.cli.AppsV1().Deployments(namespace).List(ctx, options)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if len(deployments.Items) > 0 {
|
||||
count = 1 // first deployment
|
||||
remainingItemsCount := deployments.GetRemainingItemCount()
|
||||
if remainingItemsCount != nil {
|
||||
count += *remainingItemsCount // add the remaining deployments if any
|
||||
}
|
||||
}
|
||||
|
||||
// StatefulSets
|
||||
statefulSets, err := kcl.cli.AppsV1().StatefulSets(namespace).List(ctx, options)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if len(statefulSets.Items) > 0 {
|
||||
count += 1 // + first statefulset
|
||||
remainingItemsCount := statefulSets.GetRemainingItemCount()
|
||||
if remainingItemsCount != nil {
|
||||
count += *remainingItemsCount // add the remaining statefulsets if any
|
||||
}
|
||||
}
|
||||
|
||||
// Daemonsets
|
||||
daemonsets, err := kcl.cli.AppsV1().DaemonSets(namespace).List(ctx, options)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if len(daemonsets.Items) > 0 {
|
||||
count += 1 // + first daemonset
|
||||
remainingItemsCount := daemonsets.GetRemainingItemCount()
|
||||
if remainingItemsCount != nil {
|
||||
count += *remainingItemsCount // add the remaining daemonsets if any
|
||||
}
|
||||
}
|
||||
|
||||
// + (naked pods)
|
||||
nakedPods, err := kcl.GetApplications(namespace, "nakedpods")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return count + int64(len(nakedPods)), nil
|
||||
}
|
||||
|
||||
// Get the total count of services for the given namespace
|
||||
func getServicesCount(ctx context.Context, kcl *KubeClient, namespace string) (int64, error) {
|
||||
options := v1.ListOptions{
|
||||
Limit: 1,
|
||||
}
|
||||
var count int64 = 0
|
||||
services, err := kcl.cli.CoreV1().Services(namespace).List(ctx, options)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if len(services.Items) > 0 {
|
||||
count = 1 // first service
|
||||
remainingItemsCount := services.GetRemainingItemCount()
|
||||
if remainingItemsCount != nil {
|
||||
count += *remainingItemsCount // add the remaining services if any
|
||||
}
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// Get the total count of ingresses for the given namespace
|
||||
func getIngressesCount(ctx context.Context, kcl *KubeClient, namespace string) (int64, error) {
|
||||
ingresses, err := kcl.cli.NetworkingV1().Ingresses(namespace).List(ctx, v1.ListOptions{Limit: 1})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
count := int64(0)
|
||||
if len(ingresses.Items) > 0 {
|
||||
count = 1 // first ingress
|
||||
remainingItemsCount := ingresses.GetRemainingItemCount()
|
||||
if remainingItemsCount != nil {
|
||||
count += *remainingItemsCount // add the remaining ingresses if any
|
||||
}
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// Get the total count of configMaps for the given namespace
|
||||
func getConfigMapsCount(ctx context.Context, kcl *KubeClient, namespace string) (int64, error) {
|
||||
configMaps, err := kcl.cli.CoreV1().ConfigMaps(namespace).List(ctx, v1.ListOptions{Limit: 1})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
count := int64(0)
|
||||
if len(configMaps.Items) > 0 {
|
||||
count = 1 // first configmap
|
||||
remainingItemsCount := configMaps.GetRemainingItemCount()
|
||||
if remainingItemsCount != nil {
|
||||
count += *remainingItemsCount // add the remaining configmaps if any
|
||||
}
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// Get the total count of secrets for the given namespace
|
||||
func getSecretsCount(ctx context.Context, kcl *KubeClient, namespace string) (int64, error) {
|
||||
secrets, err := kcl.cli.CoreV1().Secrets(namespace).List(ctx, v1.ListOptions{Limit: 1})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
count := int64(0)
|
||||
if len(secrets.Items) > 0 {
|
||||
count = 1 // first secret
|
||||
remainingItemsCount := secrets.GetRemainingItemCount()
|
||||
if remainingItemsCount != nil {
|
||||
count += *remainingItemsCount // add the remaining secrets if any
|
||||
}
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// Get the total count of volumes for the given namespace
|
||||
func getVolumesCount(ctx context.Context, kcl *KubeClient, namespace string) (int64, error) {
|
||||
volumes, err := kcl.cli.CoreV1().PersistentVolumeClaims(namespace).List(ctx, v1.ListOptions{Limit: 1})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
count := int64(0)
|
||||
if len(volumes.Items) > 0 {
|
||||
count = 1 // first volume
|
||||
remainingItemsCount := volumes.GetRemainingItemCount()
|
||||
if remainingItemsCount != nil {
|
||||
count += *remainingItemsCount // add the remaining volumes if any
|
||||
}
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
@ -0,0 +1,43 @@
|
||||
import { useQuery } from 'react-query';
|
||||
|
||||
import { withError } from '@/react-tools/react-query';
|
||||
import axios, { parseAxiosError } from '@/portainer/services/axios';
|
||||
import { EnvironmentId } from '@/react/portainer/environments/types';
|
||||
|
||||
import { K8sDashboard } from '../types';
|
||||
|
||||
const queryKeys = {
|
||||
list: (environmentId: EnvironmentId) =>
|
||||
['environments', environmentId, 'dashboard'] as const,
|
||||
};
|
||||
|
||||
export function useGetDashboardQuery(
|
||||
environmentId: EnvironmentId,
|
||||
options?: { autoRefreshRate?: number }
|
||||
) {
|
||||
return useQuery(
|
||||
queryKeys.list(environmentId),
|
||||
async () => getDashboard(environmentId),
|
||||
{
|
||||
...withError('Unable to get dashboard stats'),
|
||||
refetchInterval() {
|
||||
return options?.autoRefreshRate ?? false;
|
||||
},
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
async function getDashboard(environmentId: EnvironmentId) {
|
||||
try {
|
||||
const { data: dashboard } = await axios.get<K8sDashboard>(
|
||||
`kubernetes/${environmentId}/dashboard`
|
||||
);
|
||||
|
||||
return dashboard;
|
||||
} catch (e) {
|
||||
throw parseAxiosError(
|
||||
e,
|
||||
'Unable to get dashboard stats. Some counts may be inaccurate.'
|
||||
);
|
||||
}
|
||||
}
|
@ -0,0 +1,9 @@
|
||||
export type K8sDashboard = {
|
||||
namespacesCount: number;
|
||||
applicationsCount: number;
|
||||
servicesCount: number;
|
||||
ingressesCount: number;
|
||||
configMapsCount: number;
|
||||
secretsCount: number;
|
||||
volumesCount: number;
|
||||
};
|
Loading…
Reference in new issue