Merge pull request #54300 from jsafrane/fix-test-images

Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Fix test images

These commits fix volume_io tests for iSCSI and Ceph RBD. Both server images were quite old (Fedora 22), so I updated them to ~~something more stable (CentOS 7) and to newer Ceph (Jewel, 10.2.7).~~ something newer (Fedora 26).

The most important fix is that the test volumes have 120 MB so volume_io test can actually run - the tests put 100MB file to the volume to check its persistence.

When mount containers in #53440 are merged I'll try to run the tests regularly with every PR (or merge) so we catch regressions quickly.

```release-note
NONE
```

/sig testing
/sig storage

/assign @jeffvance 

Fixes: #56725
pull/8/head
Kubernetes Submit Queue 2018-03-19 20:34:22 -07:00 committed by GitHub
commit 37b2edd855
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 99 additions and 33 deletions

View File

@ -67,6 +67,10 @@ const (
// Waiting period for volume server (Ceph, ...) to initialize itself.
VolumeServerPodStartupSleep = 20 * time.Second
// Waiting period for pod to be cleaned up and unmount its volumes so we
// don't tear down containers with NFS/Ceph/Gluster server too early.
PodCleanupTimeout = 20 * time.Second
)
// Configuration of one tests. The test consist of:
@ -351,8 +355,8 @@ func VolumeTestCleanup(f *Framework, config VolumeTestConfig) {
}
// See issue #24100.
// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
By("sleeping a bit so client can stop and unmount")
time.Sleep(20 * time.Second)
By("sleeping a bit so kubelet can unmount and detach the volume")
time.Sleep(PodCleanupTimeout)
err = podClient.Delete(config.Prefix+"-server", nil)
if err != nil {

View File

@ -33,6 +33,7 @@ import (
"path"
"strconv"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -64,6 +65,7 @@ var md5hashes = map[int64]string{
func makePodSpec(config framework.VolumeTestConfig, dir, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod {
volName := fmt.Sprintf("%s-%s", config.Prefix, "io-volume")
var gracePeriod int64 = 1
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
@ -110,7 +112,8 @@ func makePodSpec(config framework.VolumeTestConfig, dir, initCmd string, volsrc
},
},
},
SecurityContext: podSecContext,
TerminationGracePeriodSeconds: &gracePeriod,
SecurityContext: podSecContext,
Volumes: []v1.Volume{
{
Name: volName,
@ -209,6 +212,9 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config framewo
if err == nil { // delete err is returned if err is not set
err = e
}
} else {
framework.Logf("sleeping a bit so kubelet can unmount and detach the volume")
time.Sleep(framework.PodCleanupTimeout)
}
}()
@ -416,7 +422,7 @@ var _ = utils.SIGDescribe("Volume plugin streaming [Slow]", func() {
Name: name,
},
FSType: "ext2",
ReadOnly: true,
ReadOnly: false,
},
}
})

View File

@ -261,7 +261,7 @@ var _ = utils.SIGDescribe("Volumes", func() {
config := framework.VolumeTestConfig{
Namespace: namespace.Name,
Prefix: "cephfs",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeCephServer),
ServerImage: imageutils.GetE2EImage(imageutils.VolumeRBDServer),
ServerPorts: []int{6789},
}
@ -279,10 +279,9 @@ var _ = utils.SIGDescribe("Volumes", func() {
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-secret",
},
// Must use the ceph keyring at contrib/for-tests/volumes-ceph/ceph/init.sh
// and encode in base64
// from test/images/volumes-tester/rbd/keyring
Data: map[string][]byte{
"key": []byte("AQAMgXhVwBCeDhAA9nlPaFyfUSatGD4drFWDvQ=="),
"key": []byte("AQDRrKNVbEevChAAEmRC+pW/KBVHxa0w/POILA=="),
},
Type: "kubernetes.io/cephfs",
}

View File

@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM centos
RUN yum -y install hostname centos-release-gluster && yum -y install glusterfs-server && yum clean all
FROM fedora:26
RUN yum -y install hostname glusterfs-server && yum clean all
ADD glusterd.vol /etc/glusterfs/
ADD run_gluster.sh /usr/local/bin/
ADD index.html /vol/

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
TAG = 0.4
TAG = 0.5
PREFIX = staging-k8s.gcr.io
all: push

View File

@ -12,8 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM fedora
RUN yum install -y iscsi-initiator-utils targetcli net-tools strace && yum clean all
FROM fedora:26
RUN yum install -y iscsi-initiator-utils targetcli net-tools strace procps-ng psmisc && yum clean all
ADD run_iscsid.sh /usr/local/bin/
ADD initiatorname.iscsi /etc/iscsi/
ADD block.tar.gz /

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
TAG = 0.1
TAG = 0.2
PREFIX = staging-k8s.gcr.io
all: push

View File

@ -33,8 +33,9 @@ cleanup()
trap cleanup TERM EXIT
# Create 1MB device with ext2
dd if=/dev/zero of=block count=1 bs=1M
# Create 120MB device with ext2
# (volume_io tests need at least 100MB)
dd if=/dev/zero of=block seek=120 count=1 bs=1M
mkfs.ext2 block
# Add index.html to it

View File

@ -16,16 +16,26 @@
function start()
{
# targetcli need dbus
mkdir /run/dbus
dbus-daemon --system
# clear any previous configuration
targetcli clearconfig confirm=True
# restore configuration from saveconfig.json
targetcli restoreconfig
iscsid
# maximum log level
iscsid -f -d 8
echo "iscsid started"
}
function stop()
{
echo "Stopping iscsid"
kill $( cat /var/run/iscsid.pid )
killall iscsid
targetcli clearconfig confirm=True
echo "iscsid stopped"

View File

@ -32,7 +32,7 @@
"dev": "block",
"name": "block",
"plugin": "fileio",
"size": 1048576,
"size": 126877696,
"write_back": true,
"wwn": "521c57aa-9d9b-4e5d-ab1a-527487f92a33"
}

View File

@ -15,16 +15,17 @@
# CEPH all in one
# Based on image by Ricardo Rocha, ricardo@catalyst.net.nz
FROM fedora
FROM fedora:26
# Base Packages
RUN yum install -y wget ceph ceph-fuse strace && yum clean all
RUN yum install -y wget strace psmisc procps-ng ceph ceph-fuse && yum clean all
# Get ports exposed
EXPOSE 6789
ADD ./bootstrap.sh /bootstrap.sh
ADD ./mon.sh /mon.sh
ADD ./mds.sh /mds.sh
ADD ./osd.sh /osd.sh
ADD ./ceph.conf.sh /ceph.conf.sh
ADD ./keyring /var/lib/ceph/mon/keyring

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
TAG = 0.1
TAG = 0.2
PREFIX = staging-k8s.gcr.io
all: push

View File

@ -35,9 +35,29 @@ mkdir -p /var/lib/ceph/osd/ceph-0 /var/lib/ceph/osd/ceph-1
sh ./osd.sh 0
sh ./osd.sh 1
# Prepare a RBD volume
# NOTE: we need Ceph kernel modules on the host!
rbd import block foo
# Configure and start cephfs metadata server
sh ./mds.sh
# Prepare a RBD volume "foo" (only with layering feature, the others may
# require newer clients).
# NOTE: we need Ceph kernel modules on the host that runs the client!
rbd import --image-feature layering block foo
# Prepare a cephfs volume
ceph osd pool create cephfs_data 4
ceph osd pool create cephfs_metadata 4
ceph fs new cephfs cephfs_metadata cephfs_data
# Put index.html into the volume
# It takes a while until the volume created above is mountable,
# 1 second is usually enough, but try indefinetily.
sleep 1
while ! ceph-fuse -m `hostname -i`:6789 /mnt; do
echo "Waiting for cephfs to be up"
sleep 1
done
echo "Hello Ceph!" > /mnt/index.html
chmod 644 /mnt/index.html
umount /mnt
echo "Ceph is ready"

View File

@ -29,9 +29,13 @@ host = cephbox
mon addr = $1
[osd]
osd journal size = 128
osd journal size = 128
journal dio = false
# allow running on ext4
osd max object name len = 256
osd max object namespace len = 64
[osd.0]
osd host = cephbox
" > /etc/ceph/ceph.conf

View File

@ -36,8 +36,9 @@ cleanup()
trap cleanup TERM EXIT
# Create 1MB device with ext2
dd if=/dev/zero of=block count=1 bs=1M
# Create 120MB device with ext2
# (volume_io tests need at least 100MB)
dd if=/dev/zero of=block seek=120 count=1 bs=1M
mkfs.ext2 block
# Add index.html to it

View File

@ -0,0 +1,20 @@
#!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Configures Ceph Metadata Service (mds), needed by CephFS
#
ceph-mds -i cephfs -c /etc/ceph/ceph.conf

View File

@ -94,10 +94,9 @@ var (
ServeHostname = ImageConfig{e2eRegistry, "serve-hostname", "1.0", true}
TestWebserver = ImageConfig{e2eRegistry, "test-webserver", "1.0", true}
VolumeNFSServer = ImageConfig{gcRegistry, "volume-nfs", "0.8", false}
VolumeISCSIServer = ImageConfig{gcRegistry, "volume-icsci", "0.1", false}
VolumeGlusterServer = ImageConfig{gcRegistry, "volume-gluster", "0.2", false}
VolumeCephServer = ImageConfig{gcRegistry, "volume-ceph", "0.1", false}
VolumeRBDServer = ImageConfig{gcRegistry, "volume-rbd", "0.1", false}
VolumeISCSIServer = ImageConfig{gcRegistry, "volume-iscsi", "0.2", false}
VolumeGlusterServer = ImageConfig{gcRegistry, "volume-gluster", "0.5", false}
VolumeRBDServer = ImageConfig{gcRegistry, "volume-rbd", "0.2", false}
)
func GetE2EImage(image ImageConfig) string {