Add auto import images for containerd image store

* Add auto import images

Signed-off-by: Vitor Savian <vitor.savian@suse.com>

* Fix EOF error log when importing tarball files

Signed-off-by: Vitor Savian <vitor.savian@suse.com>

* Delaying queue

Signed-off-by: Vitor Savian <vitor.savian@suse.com>

* Add parse for images

Signed-off-by: Vitor Savian <vitor.savian@suse.com>
pull/11537/head
Vitor Savian 2025-01-09 13:15:27 -03:00 committed by GitHub
parent f345697c0a
commit 7e18c69254
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 728 additions and 23 deletions

View File

@ -40,7 +40,7 @@ jobs:
strategy:
fail-fast: false
matrix:
etest: [startup, s3, btrfs, externalip, privateregistry, embeddedmirror, wasm]
etest: [autoimport, startup, s3, btrfs, externalip, privateregistry, embeddedmirror, wasm]
max-parallel: 3
steps:
- name: "Checkout"

View File

@ -0,0 +1,54 @@
# Easy way for auto adding images to k3s
Date: 2024-10-2
## Status
Proposed
## Context
Since the feature for embedded registry, the users appeared with a question about having to manually import images, specially in edge environments.
As a result, there is a need for a folder who can handle this action, where every image there will be watched by a controller for changes or new images, this new images or new changes will be added to the containerd image store.
The controller will watch the agent/images folder that is the default folder for the images, as the first iteration about the controller he will mainly work with the default image folder, but in the future we can set to watch more folders.
The main idea for the controller is to create a map for the file infos maintaining the state for the files, with that we can see if a file was modified and if the size changed.
### Map to handle the state from the files
This map will have the entire filepath of the file in the `key` value, since we can get the value from the key with only the `event.Name`
```go
map[string]fs.FileInfo
```
### Why use fsnotify
With this library we can easily use for any linux distros without the need to port for a specify distro and can also run in windows.
The main idea for the watch will be taking care of the last time that was modified the image file.
fsnotify has a great toolset for handling changes in files, since the code will have a channel to receive events such as CREATE, RENAME, REMOVE and WRITE.
### How the controller will work with the events
When the controller receive a event saying that a file was created, he will add to the map and import the images if the event that he has received is not a directory and then import the image.
When the controller receive a event saying that a file was writen, he will verify if the file has the size changed and if the file has the time modified based on the time and size from the state.
When the controller receive a event saying that a file was renamed, or removed, he will delete this file from the state. when a file is renamed, it is created a new file with the same infos but with a the new name, so the watcher will sent for the controller a event saying that a file was created.
## Decision
- Decided
## Consequences
Good:
- Better use of embedded containerd image store.
- Fsnotify it's a indirect dependency that upstream uses
Bad:
- The need for another dependency

4
go.mod
View File

@ -93,6 +93,7 @@ require (
github.com/docker/docker v27.1.1+incompatible
github.com/erikdubbelboer/gspt v0.0.0-20190125194910-e68493906b83
github.com/flannel-io/flannel v0.25.7
github.com/fsnotify/fsnotify v1.7.0
github.com/go-bindata/go-bindata v3.1.2+incompatible
github.com/go-logr/logr v1.4.2
github.com/go-logr/stdr v1.2.3-0.20220714215716-96bad1d688c5
@ -169,6 +170,7 @@ require (
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
sigs.k8s.io/cri-tools v0.0.0-00010101000000-000000000000
sigs.k8s.io/yaml v1.4.0
github.com/google/go-containerregistry v0.20.2
)
require (
@ -257,7 +259,6 @@ require (
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/flynn/noise v1.1.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-jose/go-jose/v4 v4.0.2 // indirect
@ -277,7 +278,6 @@ require (
github.com/google/cel-go v0.22.0 // indirect
github.com/google/gnostic-models v0.6.9 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/go-containerregistry v0.20.2 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect

View File

@ -18,6 +18,7 @@ import (
"github.com/containerd/containerd/pkg/cri/constants"
"github.com/containerd/containerd/pkg/cri/labels"
"github.com/containerd/containerd/reference/docker"
reference "github.com/google/go-containerregistry/pkg/name"
"github.com/k3s-io/k3s/pkg/agent/cri"
util2 "github.com/k3s-io/k3s/pkg/agent/util"
"github.com/k3s-io/k3s/pkg/daemons/config"
@ -115,24 +116,6 @@ func Run(ctx context.Context, cfg *config.Node) error {
// any .txt files are processed as a list of images that should be pre-pulled from remote registries.
// If configured, imported images are retagged as being pulled from additional registries.
func PreloadImages(ctx context.Context, cfg *config.Node) error {
fileInfo, err := os.Stat(cfg.Images)
if os.IsNotExist(err) {
return nil
} else if err != nil {
logrus.Errorf("Unable to find images in %s: %v", cfg.Images, err)
return nil
}
if !fileInfo.IsDir() {
return nil
}
fileInfos, err := os.ReadDir(cfg.Images)
if err != nil {
logrus.Errorf("Unable to read images in %s: %v", cfg.Images, err)
return nil
}
client, err := Client(cfg.Containerd.Address)
if err != nil {
return err
@ -162,6 +145,28 @@ func PreloadImages(ctx context.Context, cfg *config.Node) error {
return errors.Wrap(err, "failed to clear pinned labels")
}
go watchImages(ctx, cfg)
// After setting the watcher, connections and everything, k3s will see if the images folder is already created
// if the folder its already created, it will load the images
fileInfo, err := os.Stat(cfg.Images)
if os.IsNotExist(err) {
return nil
} else if err != nil {
logrus.Errorf("Unable to find images in %s: %v", cfg.Images, err)
return nil
}
if !fileInfo.IsDir() {
return nil
}
fileInfos, err := os.ReadDir(cfg.Images)
if err != nil {
logrus.Errorf("Unable to read images in %s: %v", cfg.Images, err)
return nil
}
for _, fileInfo := range fileInfos {
if fileInfo.IsDir() {
continue
@ -176,6 +181,7 @@ func PreloadImages(ctx context.Context, cfg *config.Node) error {
}
logrus.Infof("Imported images from %s in %s", filePath, time.Since(start))
}
return nil
}
@ -214,7 +220,7 @@ func preloadFile(ctx context.Context, cfg *config.Node, client *containerd.Clien
}
}
if err := labelImages(ctx, client, images); err != nil {
if err := labelImages(ctx, client, images, filepath.Base(filePath)); err != nil {
return errors.Wrap(err, "failed to add pinned label to images")
}
if err := retagImages(ctx, client, images, cfg.AgentConfig.AirgapExtraRegistry); err != nil {
@ -265,7 +271,7 @@ func clearLabels(ctx context.Context, client *containerd.Client) error {
// labelImages adds labels to the listed images, indicating that they
// are pinned by k3s and should not be pruned.
func labelImages(ctx context.Context, client *containerd.Client, images []images.Image) error {
func labelImages(ctx context.Context, client *containerd.Client, images []images.Image, fileName string) error {
var errs []error
imageService := client.ImageService()
for i, image := range images {
@ -277,6 +283,7 @@ func labelImages(ctx context.Context, client *containerd.Client, images []images
if image.Labels == nil {
image.Labels = map[string]string{}
}
image.Labels[k3sPinnedImageLabelKey] = k3sPinnedImageLabelValue
image.Labels[labels.PinnedImageLabelKey] = labels.PinnedImageLabelValue
updatedImage, err := imageService.Update(ctx, image, "labels")
@ -354,6 +361,16 @@ func prePullImages(ctx context.Context, client *containerd.Client, imageClient r
for scanner.Scan() {
name := strings.TrimSpace(scanner.Text())
if name == "" {
continue
}
// the options in the reference.ParseReference are for filtering only strings that cannot be seen as a possible image
if _, err := reference.ParseReference(name, reference.WeakValidation, reference.Insecure); err != nil {
logrus.Errorf("Failed to parse image reference %q: %v", name, err)
continue
}
if status, err := imageClient.ImageStatus(ctx, &runtimeapi.ImageStatusRequest{
Image: &runtimeapi.ImageSpec{
Image: name,

View File

@ -0,0 +1,290 @@
package containerd
import (
"context"
"fmt"
"io/fs"
"os"
"path/filepath"
"strings"
"time"
"github.com/containerd/containerd"
"github.com/fsnotify/fsnotify"
"github.com/k3s-io/k3s/pkg/agent/cri"
"github.com/k3s-io/k3s/pkg/daemons/config"
"github.com/pkg/errors"
"github.com/rancher/wharfie/pkg/tarfile"
"github.com/rancher/wrangler/v3/pkg/merr"
"github.com/sirupsen/logrus"
"k8s.io/client-go/util/workqueue"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
)
type Watcher struct {
watcher *fsnotify.Watcher
filesCache map[string]fs.FileInfo
workqueue workqueue.TypedDelayingInterface[string]
}
func CreateWatcher() (*Watcher, error) {
watcher, err := fsnotify.NewWatcher()
if err != nil {
return nil, err
}
return &Watcher{
watcher: watcher,
filesCache: make(map[string]fs.FileInfo),
workqueue: workqueue.TypedNewDelayingQueue[string](),
}, nil
}
func isFileSupported(path string) bool {
for _, ext := range append(tarfile.SupportedExtensions, ".txt") {
if strings.HasSuffix(path, ext) {
return true
}
}
return false
}
func (w *Watcher) HandleWatch(path string) error {
if err := w.watcher.Add(path); err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to watch from %s directory: %v", path, err))
}
return nil
}
// Populate the state of the files in the directory
// for the watcher to have infos about the file changing
// this function need to break
func (w *Watcher) Populate(path string) error {
var errs []error
fileInfos, err := os.ReadDir(path)
if err != nil {
logrus.Errorf("Unable to read files in %s: %v", path, err)
return err
}
for _, dirEntry := range fileInfos {
if dirEntry.IsDir() {
continue
}
// get the file info to add to the state map
fileInfo, err := dirEntry.Info()
if err != nil {
logrus.Errorf("Failed while getting the info from file: %v", err)
errs = append(errs, err)
continue
}
if isFileSupported(dirEntry.Name()) {
// insert the file into the state map that will have the state from the file
w.filesCache[filepath.Join(path, dirEntry.Name())] = fileInfo
}
}
return merr.NewErrors(errs...)
}
func (w *Watcher) ClearMap() {
w.filesCache = make(map[string]fs.FileInfo)
}
func (w *Watcher) runWorkerForImages(ctx context.Context, cfg *config.Node) {
// create the connections to not create every time when processing a event
client, err := Client(cfg.Containerd.Address)
if err != nil {
logrus.Errorf("Failed to create containerd client: %v", err)
w.watcher.Close()
return
}
defer client.Close()
criConn, err := cri.Connection(ctx, cfg.Containerd.Address)
if err != nil {
logrus.Errorf("Failed to create CRI connection: %v", err)
w.watcher.Close()
return
}
defer criConn.Close()
imageClient := runtimeapi.NewImageServiceClient(criConn)
for w.processNextEventForImages(ctx, cfg, client, imageClient) {
}
}
func (w *Watcher) processNextEventForImages(ctx context.Context, cfg *config.Node, client *containerd.Client, imageClient runtimeapi.ImageServiceClient) bool {
key, shutdown := w.workqueue.Get()
if shutdown {
return false
}
if err := w.processImageEvent(ctx, key, cfg, client, imageClient); err != nil {
logrus.Errorf("Failed to process image event: %v", err)
}
return true
}
func (w *Watcher) processImageEvent(ctx context.Context, key string, cfg *config.Node, client *containerd.Client, imageClient runtimeapi.ImageServiceClient) error {
defer w.workqueue.Done(key)
file, err := os.Stat(key)
// if the file does not exists, we assume that the event was RENAMED or REMOVED
if os.IsNotExist(err) {
if key == cfg.Images {
w.ClearMap()
return nil
}
if !isFileSupported(key) {
return nil
}
delete(w.filesCache, key)
logrus.Debugf("File removed from the image watcher controller: %s", key)
return nil
} else if err != nil {
logrus.Errorf("Failed to get file %s info for image event: %v", key, err)
return err
}
if file.IsDir() {
// only add the image watcher, populate and search for images when it is the images folder
if key == cfg.Images {
if err := w.HandleWatch(cfg.Images); err != nil {
logrus.Errorf("Failed to watch %s: %v", cfg.Images, err)
return err
}
if err := w.Populate(cfg.Images); err != nil {
logrus.Errorf("Failed to populate %s files: %v", cfg.Images, err)
return err
}
// Read the directory to see if the created folder has files inside
fileInfos, err := os.ReadDir(cfg.Images)
if err != nil {
logrus.Errorf("Unable to read images in %s: %v", cfg.Images, err)
return err
}
for _, fileInfo := range fileInfos {
if fileInfo.IsDir() {
continue
}
start := time.Now()
filePath := filepath.Join(cfg.Images, fileInfo.Name())
if err := preloadFile(ctx, cfg, client, imageClient, filePath); err != nil {
logrus.Errorf("Error encountered while importing %s: %v", filePath, err)
continue
}
logrus.Infof("Imported images from %s in %s", filePath, time.Since(start))
}
}
return nil
}
if !isFileSupported(key) {
return nil
}
lastStateFile := w.filesCache[key]
w.filesCache[key] = file
if lastStateFile == nil || (file.Size() != lastStateFile.Size()) && file.ModTime().After(lastStateFile.ModTime()) {
logrus.Debugf("File met the requirements for import to containerd image store: %s", key)
start := time.Now()
if err := preloadFile(ctx, cfg, client, imageClient, key); err != nil {
logrus.Errorf("Failed to import %s: %v", key, err)
return err
}
logrus.Infof("Imported images from %s in %s", key, time.Since(start))
}
return nil
}
func watchImages(ctx context.Context, cfg *config.Node) {
w, err := CreateWatcher()
if err != nil {
logrus.Errorf("Failed to create image watcher: %v", err)
return
}
logrus.Debugf("Image Watcher created")
defer w.watcher.Close()
if err := w.HandleWatch(filepath.Dir(cfg.Images)); err != nil {
logrus.Errorf("Failed to watch %s: %v", filepath.Dir(cfg.Images), err)
return
}
_, err = os.Stat(cfg.Images)
if err == nil {
if err := w.HandleWatch(cfg.Images); err != nil {
logrus.Errorf("Failed to watch %s: %v", cfg.Images, err)
return
}
if err := w.Populate(cfg.Images); err != nil {
logrus.Errorf("Failed to populate %s files: %v", cfg.Images, err)
return
}
} else if os.IsNotExist(err) {
logrus.Debugf("Image dir %s does not exist", cfg.Images)
} else {
logrus.Debugf("Failed to stat image dir %s: %v", cfg.Images, err)
}
go w.runWorkerForImages(ctx, cfg)
for {
select {
case event, ok := <-w.watcher.Events:
if !ok {
logrus.Info("Image watcher channel closed, shutting down workqueue and retrying in 5 seconds")
w.workqueue.ShutDown()
select {
case <-time.After(time.Second * 5):
go watchImages(ctx, cfg)
return
case <-ctx.Done():
return
}
}
// this part is to specify to only get events that were from /agent/images
if strings.Contains(event.Name, "/agent/images") {
w.workqueue.AddAfter(event.Name, 2*time.Second)
}
case err, ok := <-w.watcher.Errors:
if !ok {
logrus.Info("Image watcher channel closed, shutting down workqueue and retrying in 5 seconds")
w.workqueue.ShutDown()
select {
case <-time.After(time.Second * 5):
go watchImages(ctx, cfg)
return
case <-ctx.Done():
return
}
}
logrus.Errorf("Image watcher received an error: %v", err)
}
}
}

104
tests/e2e/autoimport/Vagrantfile vendored Normal file
View File

@ -0,0 +1,104 @@
ENV['VAGRANT_NO_PARALLEL'] = 'no'
NODE_ROLES = (ENV['E2E_NODE_ROLES'] ||
["server-0"])
NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['bento/ubuntu-24.04'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
GOCOVER = (ENV['E2E_GOCOVER'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks
NETWORK_PREFIX = "10.10.10"
install_type = ""
def provision(vm, role, role_num, node_num)
vm.box = NODE_BOXES[node_num]
vm.hostname = role
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32
node_ip = "#{NETWORK_PREFIX}.#{100+node_num}"
vm.network "private_network", ip: node_ip, netmask: "255.255.255.0"
scripts_location = Dir.exist?("./scripts") ? "./scripts" : "../scripts"
vagrant_defaults = File.exist?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb"
load vagrant_defaults
defaultOSConfigure(vm)
addCoverageDir(vm, role, GOCOVER)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
if role.include?("server") && role_num == 0
dockerInstall(vm)
vm.provision 'k3s-primary-server', type: 'k3s', run: 'once' do |k3s|
k3s.args = "server "
k3s.config = <<~YAML
token: vagrant
node-external-ip: #{NETWORK_PREFIX}.100
flannel-iface: eth1
cluster-init: true
YAML
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
elsif role.include?("server") && role_num != 0
vm.provision 'k3s-secondary-server', type: 'k3s', run: 'once' do |k3s|
k3s.args = "server"
k3s.config = <<~YAML
server: "https://#{NETWORK_PREFIX}.100:6443"
token: vagrant
node-external-ip: #{node_ip}
flannel-iface: eth1
YAML
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
end
if role.include?("agent")
vm.provision 'k3s-agent', type: 'k3s', run: 'once' do |k3s|
k3s.args = "agent"
k3s.config = <<~YAML
server: "https://#{NETWORK_PREFIX}.100:6443"
token: vagrant
node-external-ip: #{node_ip}
flannel-iface: eth1
YAML
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
end
if vm.box.to_s.include?("microos")
vm.provision 'k3s-reload', type: 'reload', run: 'once'
end
end
Vagrant.configure("2") do |config|
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"]
# Default provider is libvirt, virtualbox is only provided as a backup
config.vm.provider "libvirt" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
# We replicate the default prefix, but add a timestamp to enable parallel runs and cleanup of old VMs
v.default_prefix = File.basename(Dir.getwd) + "_" + Time.now.to_i.to_s + "_"
end
config.vm.provider "virtualbox" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end
if NODE_ROLES.kind_of?(String)
NODE_ROLES = NODE_ROLES.split(" ", -1)
end
if NODE_BOXES.kind_of?(String)
NODE_BOXES = NODE_BOXES.split(" ", -1)
end
NODE_ROLES.each_with_index do |role, i|
role_num = role.split("-", -1).pop.to_i
config.vm.define role do |node|
provision(node.vm, role, role_num, i)
end
end
end

View File

@ -0,0 +1,240 @@
package autoimport
import (
"flag"
"fmt"
"os"
"strings"
"testing"
"github.com/k3s-io/k3s/tests/e2e"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
// Valid nodeOS:
// bento/ubuntu-24.04, opensuse/Leap-15.6.x86_64
// eurolinux-vagrant/rocky-8, eurolinux-vagrant/rocky-9,
var nodeOS = flag.String("nodeOS", "bento/ubuntu-24.04", "VM operating system")
var serverCount = flag.Int("serverCount", 1, "number of server nodes")
var agentCount = flag.Int("agentCount", 0, "number of agent nodes")
var ci = flag.Bool("ci", false, "running on CI")
var local = flag.Bool("local", false, "deploy a locally built K3s binary")
// Environment Variables Info:
// E2E_RELEASE_VERSION=v1.23.1+k3s2 (default: latest commit from master)
// E2E_REGISTRY: true/false (default: false)
func Test_E2EAutoImport(t *testing.T) {
RegisterFailHandler(Fail)
flag.Parse()
suiteConfig, reporterConfig := GinkgoConfiguration()
RunSpecs(t, "Create Cluster Test Suite", suiteConfig, reporterConfig)
}
var (
kubeConfigFile string
serverNodeNames []string
agentNodeNames []string
)
var _ = ReportAfterEach(e2e.GenReport)
var _ = Describe("Verify Create", Ordered, func() {
Context("Cluster :", func() {
It("Starts up with no issues", func() {
var err error
if *local {
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
} else {
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
}
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames)
fmt.Println("Agent Nodes:", agentNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})
It("Checks Node and Pod Status", func() {
fmt.Printf("\nFetching node status\n")
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "620s", "5s").Should(Succeed())
_, _ = e2e.ParseNodes(kubeConfigFile, true)
fmt.Printf("\nFetching Pods status\n")
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "620s", "5s").Should(Succeed())
_, _ = e2e.ParsePods(kubeConfigFile, true)
})
It("Create a folder in agent/images", func() {
cmd := `mkdir /var/lib/rancher/k3s/agent/images`
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
})
It("Create file for auto import and search in the image store", func() {
cmd := `echo docker.io/library/redis:latest | sudo tee /var/lib/rancher/k3s/agent/images/testautoimport.txt`
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
Eventually(func(g Gomega) {
cmd := `k3s ctr images list | grep library/redis`
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
}, "620s", "5s").Should(Succeed())
})
It("Change name for the file and see if the label is still pinned", func() {
cmd := `mv /var/lib/rancher/k3s/agent/images/testautoimport.txt /var/lib/rancher/k3s/agent/images/testautoimportrename.txt`
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
Eventually(func(g Gomega) {
cmd := `k3s ctr images list | grep library/redis`
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
}, "620s", "5s").Should(Succeed())
})
It("Create, remove and create again a file", func() {
cmd := `echo docker.io/library/busybox:latest | sudo tee /var/lib/rancher/k3s/agent/images/bb.txt`
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
Eventually(func(g Gomega) {
cmd := `k3s ctr images list | grep library/busybox`
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
}, "620s", "5s").Should(Succeed())
cmd = `rm /var/lib/rancher/k3s/agent/images/bb.txt`
_, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
Eventually(func(g Gomega) {
cmd := `k3s ctr images list | grep library/busybox`
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
}, "620s", "5s").Should(Succeed())
cmd = `echo docker.io/library/busybox:latest | sudo tee /var/lib/rancher/k3s/agent/images/bb.txt`
_, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
Eventually(func(g Gomega) {
cmd := `k3s ctr images list | grep library/busybox`
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
}, "620s", "5s").Should(Succeed())
})
It("Move the folder, add a image and then see if the image is going to be pinned", func() {
cmd := `mv /var/lib/rancher/k3s/agent/images /var/lib/rancher/k3s/agent/test`
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
cmd = `echo 'docker.io/library/mysql:latest' | sudo tee /var/lib/rancher/k3s/agent/test/mysql.txt`
_, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
cmd = `mv /var/lib/rancher/k3s/agent/test /var/lib/rancher/k3s/agent/images`
_, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
Eventually(func(g Gomega) {
cmd := `k3s ctr images list | grep library/mysql`
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
}, "620s", "5s").Should(Succeed())
})
It("Restarts normally", func() {
errRestart := e2e.RestartCluster(append(serverNodeNames, agentNodeNames...))
Expect(errRestart).NotTo(HaveOccurred(), "Restart Nodes not happened correctly")
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "620s", "5s").Should(Succeed())
})
It("Verify bb.txt image and see if are pinned", func() {
Eventually(func(g Gomega) {
cmd := `k3s ctr images list | grep library/busybox`
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
}, "620s", "5s").Should(Succeed())
})
It("Removes bb.txt file", func() {
cmd := `rm /var/lib/rancher/k3s/agent/images/bb.txt`
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
Eventually(func(g Gomega) {
cmd := `k3s ctr images list | grep library/busybox`
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
}, "620s", "5s").Should(Succeed())
})
It("Restarts normally", func() {
errRestart := e2e.RestartCluster(append(serverNodeNames, agentNodeNames...))
Expect(errRestart).NotTo(HaveOccurred(), "Restart Nodes not happened correctly")
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "620s", "5s").Should(Succeed())
})
It("Verify if bb.txt image is unpinned", func() {
Eventually(func(g Gomega) {
cmd := `k3s ctr images list | grep library/busybox`
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).ShouldNot(ContainSubstring("io.cattle.k3s.pinned=pinned"))
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).ShouldNot(ContainSubstring("io.cri-containerd.pinned=pinned"))
}, "620s", "5s").Should(Succeed())
})
})
})
var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})
var _ = AfterSuite(func() {
if !failed {
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
}
if !failed || *ci {
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}
})