2019-01-22 21:14:58 +00:00
|
|
|
package deploy
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bufio"
|
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"crypto/sha256"
|
|
|
|
"encoding/hex"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2019-11-12 23:05:09 +00:00
|
|
|
"sort"
|
2019-01-22 21:14:58 +00:00
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
errors2 "github.com/pkg/errors"
|
2021-02-13 01:06:23 +00:00
|
|
|
"github.com/rancher/k3s/pkg/agent/util"
|
2020-11-07 07:24:26 +00:00
|
|
|
apisv1 "github.com/rancher/k3s/pkg/apis/k3s.cattle.io/v1"
|
|
|
|
controllersv1 "github.com/rancher/k3s/pkg/generated/controllers/k3s.cattle.io/v1"
|
2019-05-09 22:05:51 +00:00
|
|
|
"github.com/rancher/wrangler/pkg/apply"
|
|
|
|
"github.com/rancher/wrangler/pkg/merr"
|
|
|
|
"github.com/rancher/wrangler/pkg/objectset"
|
2021-06-10 21:10:38 +00:00
|
|
|
"github.com/rancher/wrangler/pkg/schemes"
|
2019-01-22 21:14:58 +00:00
|
|
|
"github.com/sirupsen/logrus"
|
2021-06-10 21:10:38 +00:00
|
|
|
corev1 "k8s.io/api/core/v1"
|
2019-01-22 21:14:58 +00:00
|
|
|
"k8s.io/apimachinery/pkg/api/errors"
|
2020-01-29 23:40:49 +00:00
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
2019-01-22 21:14:58 +00:00
|
|
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
|
|
|
"k8s.io/apimachinery/pkg/runtime"
|
2020-01-29 23:40:49 +00:00
|
|
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
2019-01-22 21:14:58 +00:00
|
|
|
yamlDecoder "k8s.io/apimachinery/pkg/util/yaml"
|
2021-06-10 21:10:38 +00:00
|
|
|
"k8s.io/client-go/kubernetes"
|
|
|
|
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
|
|
|
"k8s.io/client-go/tools/record"
|
2019-01-22 21:14:58 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2021-06-10 18:27:36 +00:00
|
|
|
ControllerName = "deploy"
|
|
|
|
startKey = "_start_"
|
2019-01-22 21:14:58 +00:00
|
|
|
)
|
|
|
|
|
2020-11-07 07:24:26 +00:00
|
|
|
// WatchFiles sets up an OnChange callback to start a periodic goroutine to watch files for changes once the controller has started up.
|
2021-06-10 21:10:38 +00:00
|
|
|
func WatchFiles(ctx context.Context, client kubernetes.Interface, apply apply.Apply, addons controllersv1.AddonController, disables map[string]bool, bases ...string) error {
|
2019-01-22 21:14:58 +00:00
|
|
|
w := &watcher{
|
2019-05-26 06:42:09 +00:00
|
|
|
apply: apply,
|
2019-01-22 21:14:58 +00:00
|
|
|
addonCache: addons.Cache(),
|
|
|
|
addons: addons,
|
|
|
|
bases: bases,
|
2020-01-29 23:40:49 +00:00
|
|
|
disables: disables,
|
2020-04-22 16:58:41 +00:00
|
|
|
modTime: map[string]time.Time{},
|
2019-01-22 21:14:58 +00:00
|
|
|
}
|
|
|
|
|
2020-11-07 07:24:26 +00:00
|
|
|
addons.Enqueue(metav1.NamespaceNone, startKey)
|
|
|
|
addons.OnChange(ctx, "addon-start", func(key string, _ *apisv1.Addon) (*apisv1.Addon, error) {
|
2019-02-08 04:15:02 +00:00
|
|
|
if key == startKey {
|
2021-06-10 21:10:38 +00:00
|
|
|
go w.start(ctx, client)
|
2019-02-08 04:15:02 +00:00
|
|
|
}
|
|
|
|
return nil, nil
|
2019-01-22 21:14:58 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type watcher struct {
|
2019-05-09 22:05:51 +00:00
|
|
|
apply apply.Apply
|
2020-11-07 07:24:26 +00:00
|
|
|
addonCache controllersv1.AddonCache
|
|
|
|
addons controllersv1.AddonClient
|
2019-01-22 21:14:58 +00:00
|
|
|
bases []string
|
2020-01-29 23:40:49 +00:00
|
|
|
disables map[string]bool
|
2020-04-22 16:58:41 +00:00
|
|
|
modTime map[string]time.Time
|
2021-06-10 21:10:38 +00:00
|
|
|
recorder record.EventRecorder
|
2019-01-22 21:14:58 +00:00
|
|
|
}
|
|
|
|
|
2020-11-07 07:24:26 +00:00
|
|
|
// start calls listFiles at regular intervals to trigger application of manifests that have changed on disk.
|
2021-06-10 21:10:38 +00:00
|
|
|
func (w *watcher) start(ctx context.Context, client kubernetes.Interface) {
|
|
|
|
nodeName := os.Getenv("NODE_NAME")
|
|
|
|
broadcaster := record.NewBroadcaster()
|
|
|
|
broadcaster.StartLogging(logrus.Infof)
|
|
|
|
broadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: client.CoreV1().Events(metav1.NamespaceSystem)})
|
|
|
|
w.recorder = broadcaster.NewRecorder(schemes.All, corev1.EventSource{Component: ControllerName, Host: nodeName})
|
|
|
|
|
2019-03-07 18:24:18 +00:00
|
|
|
force := true
|
2019-01-22 21:14:58 +00:00
|
|
|
for {
|
2019-03-07 18:24:18 +00:00
|
|
|
if err := w.listFiles(force); err == nil {
|
|
|
|
force = false
|
|
|
|
} else {
|
2020-09-21 16:56:03 +00:00
|
|
|
logrus.Errorf("Failed to process config: %v", err)
|
2019-03-07 18:24:18 +00:00
|
|
|
}
|
2019-01-22 21:14:58 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case <-time.After(15 * time.Second):
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-07 07:24:26 +00:00
|
|
|
// listFiles calls listFilesIn on a list of paths.
|
2019-01-22 21:14:58 +00:00
|
|
|
func (w *watcher) listFiles(force bool) error {
|
|
|
|
var errs []error
|
|
|
|
for _, base := range w.bases {
|
|
|
|
if err := w.listFilesIn(base, force); err != nil {
|
|
|
|
errs = append(errs, err)
|
|
|
|
}
|
|
|
|
}
|
2019-05-09 22:05:51 +00:00
|
|
|
return merr.NewErrors(errs...)
|
2019-01-22 21:14:58 +00:00
|
|
|
}
|
|
|
|
|
2020-11-07 07:24:26 +00:00
|
|
|
// listFilesIn recursively processes all files within a path, and checks them against the disable and skip lists. Files found that
|
|
|
|
// are not on either list are loaded as Addons and applied to the cluster.
|
2019-01-22 21:14:58 +00:00
|
|
|
func (w *watcher) listFilesIn(base string, force bool) error {
|
2019-10-11 09:42:24 +00:00
|
|
|
files := map[string]os.FileInfo{}
|
|
|
|
if err := filepath.Walk(base, func(path string, info os.FileInfo, err error) error {
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
files[path] = info
|
2019-01-22 21:14:58 +00:00
|
|
|
return nil
|
2019-10-11 09:42:24 +00:00
|
|
|
}); err != nil {
|
2019-01-22 21:14:58 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-11-07 07:24:26 +00:00
|
|
|
// Make a map of .skip files - these are used later to indicate that a given file should be ignored
|
|
|
|
// For example, 'addon.yaml.skip' will cause 'addon.yaml' to be ignored completely - unless it is also
|
|
|
|
// disabled, since disable processing happens first.
|
2019-01-22 21:14:58 +00:00
|
|
|
skips := map[string]bool{}
|
2019-11-12 23:05:09 +00:00
|
|
|
keys := make([]string, len(files))
|
|
|
|
keyIndex := 0
|
|
|
|
for path, file := range files {
|
2019-01-22 21:14:58 +00:00
|
|
|
if strings.HasSuffix(file.Name(), ".skip") {
|
|
|
|
skips[strings.TrimSuffix(file.Name(), ".skip")] = true
|
|
|
|
}
|
2019-11-12 23:05:09 +00:00
|
|
|
keys[keyIndex] = path
|
|
|
|
keyIndex++
|
2019-01-22 21:14:58 +00:00
|
|
|
}
|
2019-11-12 23:05:09 +00:00
|
|
|
sort.Strings(keys)
|
2019-01-22 21:14:58 +00:00
|
|
|
|
|
|
|
var errs []error
|
2019-11-12 23:05:09 +00:00
|
|
|
for _, path := range keys {
|
2020-11-07 07:24:26 +00:00
|
|
|
// Disabled files are not just skipped, but actively deleted from the filesystem
|
|
|
|
if shouldDisableFile(base, path, w.disables) {
|
2020-01-29 23:40:49 +00:00
|
|
|
if err := w.delete(path); err != nil {
|
|
|
|
errs = append(errs, errors2.Wrapf(err, "failed to delete %s", path))
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
2020-11-07 07:24:26 +00:00
|
|
|
// Skipped files are just ignored
|
|
|
|
if shouldSkipFile(files[path].Name(), skips) {
|
2019-01-22 21:14:58 +00:00
|
|
|
continue
|
|
|
|
}
|
2020-04-22 16:58:41 +00:00
|
|
|
modTime := files[path].ModTime()
|
|
|
|
if !force && modTime.Equal(w.modTime[path]) {
|
|
|
|
continue
|
|
|
|
}
|
2019-10-11 09:42:24 +00:00
|
|
|
if err := w.deploy(path, !force); err != nil {
|
|
|
|
errs = append(errs, errors2.Wrapf(err, "failed to process %s", path))
|
2020-04-22 16:58:41 +00:00
|
|
|
} else {
|
|
|
|
w.modTime[path] = modTime
|
2019-01-22 21:14:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-09 22:05:51 +00:00
|
|
|
return merr.NewErrors(errs...)
|
2019-01-22 21:14:58 +00:00
|
|
|
}
|
|
|
|
|
2020-11-07 07:24:26 +00:00
|
|
|
// deploy loads yaml from a manifest on disk, creates an AddOn resource to track its application, and then applies
|
|
|
|
// all resources contained within to the cluster.
|
2019-01-22 21:14:58 +00:00
|
|
|
func (w *watcher) deploy(path string, compareChecksum bool) error {
|
2021-06-10 21:10:38 +00:00
|
|
|
name := basename(path)
|
|
|
|
addon, err := w.getOrCreateAddon(name)
|
2019-01-22 21:14:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-06-10 21:10:38 +00:00
|
|
|
addon.Spec.Source = path
|
|
|
|
addon.Status.GVKs = nil
|
|
|
|
|
|
|
|
// Create the new Addon now so that we can use it to report Events when parsing/applying the manifest
|
|
|
|
// Events need the UID and ObjectRevision set to function properly
|
|
|
|
if addon.UID == "" {
|
|
|
|
newAddon, err := w.addons.Create(&addon)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
addon = *newAddon
|
|
|
|
}
|
|
|
|
|
|
|
|
content, err := ioutil.ReadFile(path)
|
2019-01-22 21:14:58 +00:00
|
|
|
if err != nil {
|
2021-06-10 21:10:38 +00:00
|
|
|
w.recorder.Eventf(&addon, corev1.EventTypeWarning, "ReadManifestFailed", "Read manifest at %q failed: %v", path, err)
|
2019-01-22 21:14:58 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
checksum := checksum(content)
|
|
|
|
if compareChecksum && checksum == addon.Spec.Checksum {
|
2019-02-08 04:15:02 +00:00
|
|
|
logrus.Debugf("Skipping existing deployment of %s, check=%v, checksum %s=%s", path, compareChecksum, checksum, addon.Spec.Checksum)
|
2019-01-22 21:14:58 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-06-10 21:10:38 +00:00
|
|
|
// Attempt to parse the YAML/JSON into objects. Failure at this point would be due to bad file content - not YAML/JSON,
|
|
|
|
// YAML/JSON that can't be converted to Kubernetes objects, etc.
|
2019-01-22 21:14:58 +00:00
|
|
|
objectSet, err := objectSet(content)
|
|
|
|
if err != nil {
|
2021-06-10 21:10:38 +00:00
|
|
|
w.recorder.Eventf(&addon, corev1.EventTypeWarning, "ParseManifestFailed", "Parse manifest at %q failed: %v", path, err)
|
2019-01-22 21:14:58 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-06-10 21:10:38 +00:00
|
|
|
// Attempt to apply the changes. Failure at this point would be due to more complicated issues - invalid changes to
|
|
|
|
// existing objects, rejected by validating webhooks, etc.
|
|
|
|
w.recorder.Eventf(&addon, corev1.EventTypeNormal, "ApplyingManifest", "Applying manifest at %q", path)
|
2019-05-09 22:05:51 +00:00
|
|
|
if err := w.apply.WithOwner(&addon).Apply(objectSet); err != nil {
|
2021-06-10 21:10:38 +00:00
|
|
|
w.recorder.Eventf(&addon, corev1.EventTypeWarning, "ApplyManifestFailed", "Applying manifest at %q failed: %v", path, err)
|
2019-02-08 04:15:02 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-08-26 22:01:24 +00:00
|
|
|
// Emit event, Update Addon checksum only if apply was successful
|
2021-06-10 21:10:38 +00:00
|
|
|
w.recorder.Eventf(&addon, corev1.EventTypeNormal, "AppliedManifest", "Applied manifest at %q", path)
|
2021-08-26 22:01:24 +00:00
|
|
|
addon.Spec.Checksum = checksum
|
2019-01-22 21:14:58 +00:00
|
|
|
_, err = w.addons.Update(&addon)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-11-07 07:24:26 +00:00
|
|
|
// delete completely removes both a manifest, and any resources that it did or would have created. The manifest is
|
|
|
|
// parsed, and any resources it specified are deleted. Finally, the file itself is removed from disk.
|
2020-01-29 23:40:49 +00:00
|
|
|
func (w *watcher) delete(path string) error {
|
2020-11-07 07:24:26 +00:00
|
|
|
name := basename(path)
|
|
|
|
addon, err := w.getOrCreateAddon(name)
|
2020-01-29 23:40:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
content, err := ioutil.ReadFile(path)
|
|
|
|
if err != nil {
|
2021-06-10 21:10:38 +00:00
|
|
|
w.recorder.Eventf(&addon, corev1.EventTypeWarning, "ReadManifestFailed", "Read manifest at %q failed: %v", path, err)
|
2020-01-29 23:40:49 +00:00
|
|
|
return err
|
|
|
|
}
|
2021-06-10 21:10:38 +00:00
|
|
|
|
2020-01-29 23:40:49 +00:00
|
|
|
objectSet, err := objectSet(content)
|
|
|
|
if err != nil {
|
2021-06-10 21:10:38 +00:00
|
|
|
w.recorder.Eventf(&addon, corev1.EventTypeWarning, "ParseManifestFailed", "Parse manifest at %q failed: %v", path, err)
|
2020-01-29 23:40:49 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
var gvk []schema.GroupVersionKind
|
|
|
|
for k := range objectSet.ObjectsByGVK() {
|
|
|
|
gvk = append(gvk, k)
|
|
|
|
}
|
2021-06-10 21:10:38 +00:00
|
|
|
|
|
|
|
// ensure that the addon is completely removed before deleting the objectSet,
|
|
|
|
// so return when err == nil, otherwise pods may get stuck terminating
|
|
|
|
w.recorder.Eventf(&addon, corev1.EventTypeNormal, "DeletingManifest", "Deleting manifest at %q", path)
|
|
|
|
if err := w.addons.Delete(addon.Namespace, addon.Name, &metav1.DeleteOptions{}); err == nil || !errors.IsNotFound(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-01-29 23:40:49 +00:00
|
|
|
// apply an empty set with owner & gvk data to delete
|
|
|
|
if err := w.apply.WithOwner(&addon).WithGVK(gvk...).Apply(nil); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return os.Remove(path)
|
|
|
|
}
|
|
|
|
|
2020-11-07 07:24:26 +00:00
|
|
|
// getOrCreateAddon attempts to get an Addon by name from the addon namespace, and creates a new one
|
|
|
|
// if it cannot be found.
|
|
|
|
func (w *watcher) getOrCreateAddon(name string) (apisv1.Addon, error) {
|
|
|
|
addon, err := w.addonCache.Get(metav1.NamespaceSystem, name)
|
2019-01-22 21:14:58 +00:00
|
|
|
if errors.IsNotFound(err) {
|
2020-11-07 07:24:26 +00:00
|
|
|
addon = apisv1.NewAddon(metav1.NamespaceSystem, name, apisv1.Addon{})
|
2019-01-22 21:14:58 +00:00
|
|
|
} else if err != nil {
|
2020-11-07 07:24:26 +00:00
|
|
|
return apisv1.Addon{}, err
|
2019-01-22 21:14:58 +00:00
|
|
|
}
|
|
|
|
return *addon, nil
|
|
|
|
}
|
|
|
|
|
2020-11-07 07:24:26 +00:00
|
|
|
// objectSet returns a new ObjectSet containing all resources from a given yaml chunk
|
2019-01-22 21:14:58 +00:00
|
|
|
func objectSet(content []byte) (*objectset.ObjectSet, error) {
|
|
|
|
objs, err := yamlToObjects(bytes.NewBuffer(content))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
os := objectset.NewObjectSet()
|
|
|
|
os.Add(objs...)
|
|
|
|
return os, nil
|
|
|
|
}
|
|
|
|
|
2020-11-07 07:24:26 +00:00
|
|
|
// basename returns a file's basename by returning everything before the first period
|
|
|
|
func basename(path string) string {
|
2019-01-22 21:14:58 +00:00
|
|
|
name := filepath.Base(path)
|
|
|
|
return strings.SplitN(name, ".", 2)[0]
|
|
|
|
}
|
|
|
|
|
2020-11-07 07:24:26 +00:00
|
|
|
// checksum returns the hex-encoded SHA256 sum of a byte slice
|
2019-01-22 21:14:58 +00:00
|
|
|
func checksum(bytes []byte) string {
|
|
|
|
d := sha256.Sum256(bytes)
|
|
|
|
return hex.EncodeToString(d[:])
|
|
|
|
}
|
|
|
|
|
2020-11-07 07:24:26 +00:00
|
|
|
// isEmptyYaml returns true if a chunk of YAML contains nothing but whitespace, comments, or document separators
|
2019-03-14 11:36:26 +00:00
|
|
|
func isEmptyYaml(yaml []byte) bool {
|
|
|
|
isEmpty := true
|
|
|
|
lines := bytes.Split(yaml, []byte("\n"))
|
2019-03-14 12:12:02 +00:00
|
|
|
for _, l := range lines {
|
|
|
|
s := bytes.TrimSpace(l)
|
|
|
|
if string(s) != "---" && !bytes.HasPrefix(s, []byte("#")) && string(s) != "" {
|
2019-03-14 11:36:26 +00:00
|
|
|
isEmpty = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return isEmpty
|
|
|
|
}
|
|
|
|
|
2020-11-07 07:24:26 +00:00
|
|
|
// yamlToObjects returns an object slice yielded from documents in a chunk of YAML
|
2019-01-22 21:14:58 +00:00
|
|
|
func yamlToObjects(in io.Reader) ([]runtime.Object, error) {
|
|
|
|
var result []runtime.Object
|
|
|
|
reader := yamlDecoder.NewYAMLReader(bufio.NewReaderSize(in, 4096))
|
|
|
|
for {
|
|
|
|
raw, err := reader.Read()
|
|
|
|
if err == io.EOF {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-03-14 11:36:26 +00:00
|
|
|
if !isEmptyYaml(raw) {
|
|
|
|
obj, err := toObjects(raw)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-01-22 21:14:58 +00:00
|
|
|
|
2019-03-14 11:36:26 +00:00
|
|
|
result = append(result, obj...)
|
|
|
|
}
|
2019-01-22 21:14:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2020-11-07 07:24:26 +00:00
|
|
|
// Returns one or more objects from a single YAML document
|
2019-01-22 21:14:58 +00:00
|
|
|
func toObjects(bytes []byte) ([]runtime.Object, error) {
|
|
|
|
bytes, err := yamlDecoder.ToJSON(bytes)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-03-14 11:36:26 +00:00
|
|
|
|
2019-01-22 21:14:58 +00:00
|
|
|
obj, _, err := unstructured.UnstructuredJSONScheme.Decode(bytes, nil, nil)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if l, ok := obj.(*unstructured.UnstructuredList); ok {
|
|
|
|
var result []runtime.Object
|
|
|
|
for _, obj := range l.Items {
|
|
|
|
copy := obj
|
|
|
|
result = append(result, ©)
|
|
|
|
}
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return []runtime.Object{obj}, nil
|
|
|
|
}
|
2019-04-17 22:13:11 +00:00
|
|
|
|
2020-11-07 07:24:26 +00:00
|
|
|
// Returns true if a file should be skipped. Skips anything from the provided skip map,
|
|
|
|
// anything that is a dotfile, and anything that does not have a json/yaml/yml extension.
|
|
|
|
func shouldSkipFile(fileName string, skips map[string]bool) bool {
|
2019-04-17 22:13:11 +00:00
|
|
|
switch {
|
|
|
|
case strings.HasPrefix(fileName, "."):
|
|
|
|
return true
|
|
|
|
case skips[fileName]:
|
|
|
|
return true
|
2021-02-13 01:06:23 +00:00
|
|
|
case util.HasSuffixI(fileName, ".yaml", ".yml", ".json"):
|
2019-04-17 22:13:11 +00:00
|
|
|
return false
|
|
|
|
default:
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
2020-01-29 23:40:49 +00:00
|
|
|
|
2020-11-07 07:24:26 +00:00
|
|
|
// Returns true if a file should be disabled, by checking the file basename against a disables map.
|
|
|
|
// only json/yaml files are checked.
|
|
|
|
func shouldDisableFile(base, fileName string, disables map[string]bool) bool {
|
|
|
|
// Check to see if the file is in a subdirectory that is in the disables map.
|
|
|
|
// If a file is nested several levels deep, checks 'parent1', 'parent1/parent2', 'parent1/parent2/parent3', etc.
|
2020-01-29 23:40:49 +00:00
|
|
|
relFile := strings.TrimPrefix(fileName, base)
|
|
|
|
namePath := strings.Split(relFile, string(os.PathSeparator))
|
|
|
|
for i := 1; i < len(namePath); i++ {
|
|
|
|
subPath := filepath.Join(namePath[0:i]...)
|
|
|
|
if disables[subPath] {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
2021-02-13 01:06:23 +00:00
|
|
|
if !util.HasSuffixI(fileName, ".yaml", ".yml", ".json") {
|
2020-01-29 23:40:49 +00:00
|
|
|
return false
|
|
|
|
}
|
2020-11-07 07:24:26 +00:00
|
|
|
// Check the basename against the disables map
|
2020-01-29 23:40:49 +00:00
|
|
|
baseFile := filepath.Base(fileName)
|
|
|
|
suffix := filepath.Ext(baseFile)
|
|
|
|
baseName := strings.TrimSuffix(baseFile, suffix)
|
|
|
|
if disables[baseName] {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|