cleanup kubeadm etcd client

pull/564/head
fabriziopandini 2019-01-03 12:21:17 +01:00
parent 60561cddf1
commit 684b80f8b8
3 changed files with 2 additions and 325 deletions

View File

@ -9,7 +9,6 @@ go_library(
"//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/util/config:go_default_library", "//cmd/kubeadm/app/util/config:go_default_library",
"//cmd/kubeadm/app/util/staticpod:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/github.com/coreos/etcd/clientv3:go_default_library", "//vendor/github.com/coreos/etcd/clientv3:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/transport:go_default_library", "//vendor/github.com/coreos/etcd/pkg/transport:go_default_library",
@ -25,7 +24,6 @@ go_test(
deps = [ deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/test:go_default_library",
], ],
) )

View File

@ -34,7 +34,6 @@ import (
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/util/config" "k8s.io/kubernetes/cmd/kubeadm/app/util/config"
"k8s.io/kubernetes/cmd/kubeadm/app/util/staticpod"
) )
// ClusterInterrogator is an interface to get etcd cluster related information // ClusterInterrogator is an interface to get etcd cluster related information
@ -54,41 +53,6 @@ type Client struct {
TLS *tls.Config TLS *tls.Config
} }
// PodManifestsHaveTLS reads the etcd staticpod manifest from disk and returns false if the TLS flags
// are missing from the command list. If all the flags are present it returns true.
func PodManifestsHaveTLS(ManifestDir string) (bool, error) {
etcdPodPath := constants.GetStaticPodFilepath(constants.Etcd, ManifestDir)
etcdPod, err := staticpod.ReadStaticPodFromDisk(etcdPodPath)
if err != nil {
return false, errors.Wrap(err, "failed to check if etcd pod implements TLS")
}
tlsFlags := []string{
"--cert-file=",
"--key-file=",
"--trusted-ca-file=",
"--client-cert-auth=",
"--peer-cert-file=",
"--peer-key-file=",
"--peer-trusted-ca-file=",
"--peer-client-cert-auth=",
}
FlagLoop:
for _, flag := range tlsFlags {
for _, container := range etcdPod.Spec.Containers {
for _, arg := range container.Command {
if strings.Contains(arg, flag) {
continue FlagLoop
}
}
}
// flag not found in any container
return false, nil
}
// all flags were found in container args; pod fully implements TLS
return true, nil
}
// New creates a new EtcdCluster client // New creates a new EtcdCluster client
func New(endpoints []string, ca, cert, key string) (*Client, error) { func New(endpoints []string, ca, cert, key string) (*Client, error) {
client := Client{Endpoints: endpoints} client := Client{Endpoints: endpoints}
@ -113,53 +77,8 @@ func New(endpoints []string, ca, cert, key string) (*Client, error) {
// the kubeadm-config ConfigMap in kube-system namespace. // the kubeadm-config ConfigMap in kube-system namespace.
// Once created, the client synchronizes client's endpoints with the known endpoints from the etcd membership API (reality check). // Once created, the client synchronizes client's endpoints with the known endpoints from the etcd membership API (reality check).
func NewFromCluster(client clientset.Interface, certificatesDir string) (*Client, error) { func NewFromCluster(client clientset.Interface, certificatesDir string) (*Client, error) {
// etcd is listening the API server advertise address on each control-plane node
// Kubeadm v1.13 should manage v1.12 clusters and v1.13 clusters // so it is necessary to get the list of endpoints from kubeadm cluster status before connecting
// v1.12 clusters can be have etcd listening on localhost only (if the cluster was created with kubeadm v1.12)
// or etcd listening on localhost and API server advertise address (if the cluster was created with kubeadm v1.13).
// The first case should be dropped in v1.14 when support for v1.12 clusters can be removed from the codebase.
// Detect which type of etcd we are dealing with
// Please note that this test can be executed only on master nodes during upgrades;
// For nodes where we are joining a new control plane node instead we should tolerate that the etcd manifest does not
// exists and try to connect to etcd using API server advertise address; as described above this will lead to a know isse
// for cluster created with v1.12, but a documented workaround will be provided
oldManifest := false
klog.V(1).Infoln("checking etcd manifest")
etcdManifestFile := constants.GetStaticPodFilepath(constants.Etcd, constants.GetStaticPodDirectory())
etcdPod, err := staticpod.ReadStaticPodFromDisk(etcdManifestFile)
if err == nil {
etcdContainer := etcdPod.Spec.Containers[0]
for _, arg := range etcdContainer.Command {
if arg == "--listen-client-urls=https://127.0.0.1:2379" {
klog.V(1).Infoln("etcd manifest created by kubeadm v1.12")
oldManifest = true
}
}
// if etcd is listening on localhost only
if oldManifest == true {
// etcd cluster has a single member "by design"
endpoints := []string{fmt.Sprintf("localhost:%d", constants.EtcdListenClientPort)}
etcdClient, err := New(
endpoints,
filepath.Join(certificatesDir, constants.EtcdCACertName),
filepath.Join(certificatesDir, constants.EtcdHealthcheckClientCertName),
filepath.Join(certificatesDir, constants.EtcdHealthcheckClientKeyName),
)
if err != nil {
return nil, errors.Wrapf(err, "error creating etcd client for %v endpoint", endpoints)
}
return etcdClient, nil
}
}
// etcd is listening on localhost and API server advertise address, and
// the etcd cluster can have more than one etcd members, so it is necessary to get the
// list of endpoints from kubeadm cluster status before connecting
// Gets the cluster status // Gets the cluster status
clusterStatus, err := config.GetClusterStatus(client) clusterStatus, err := config.GetClusterStatus(client)

View File

@ -18,253 +18,13 @@ package etcd
import ( import (
"fmt" "fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv" "strconv"
"testing" "testing"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/constants"
testutil "k8s.io/kubernetes/cmd/kubeadm/test"
) )
const (
secureEtcdPod = `# generated by kubeadm v1.10.0
apiVersion: v1
kind: Pod
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
component: etcd
tier: control-plane
name: etcd
namespace: kube-system
spec:
containers:
- command:
- etcd
- --advertise-client-urls=https://127.0.0.1:2379
- --data-dir=/var/lib/etcd
- --peer-key-file=/etc/kubernetes/pki/etcd/peer.key
- --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
- --listen-client-urls=https://127.0.0.1:2379
- --peer-client-cert-auth=true
- --cert-file=/etc/kubernetes/pki/etcd/server.crt
- --key-file=/etc/kubernetes/pki/etcd/server.key
- --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
- --peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt
- --client-cert-auth=true
image: k8s.gcr.io/etcd-amd64:3.1.12
livenessProbe:
exec:
command:
- /bin/sh
- -ec
- ETCDCTL_API=3 etcdctl --endpoints=127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt
--cert=/etc/kubernetes/pki/etcd/healthcheck-client.crt --key=/etc/kubernetes/pki/etcd/healthcheck-client.key
get foo
failureThreshold: 8
initialDelaySeconds: 15
timeoutSeconds: 15
name: etcd
resources: {}
volumeMounts:
- mountPath: /var/lib/etcd
name: etcd-data
- mountPath: /etc/kubernetes/pki/etcd
name: etcd-certs
hostNetwork: true
volumes:
- hostPath:
path: /var/lib/etcd
type: DirectoryOrCreate
name: etcd-data
- hostPath:
path: /etc/kubernetes/pki/etcd
type: DirectoryOrCreate
name: etcd-certs
status: {}
`
secureExposedEtcdPod = `
apiVersion: v1
kind: Pod
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
component: etcd
tier: control-plane
name: etcd
namespace: kube-system
spec:
containers:
- command:
- etcd
- --advertise-client-urls=https://10.0.5.5:2379
- --data-dir=/var/lib/etcd
- --peer-key-file=/etc/kubernetes/pki/etcd/peer.key
- --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
- --listen-client-urls=https://[::0:0]:2379
- --peer-client-cert-auth=true
- --cert-file=/etc/kubernetes/pki/etcd/server.crt
- --key-file=/etc/kubernetes/pki/etcd/server.key
- --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
- --peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt
- --client-cert-auth=true
image: k8s.gcr.io/etcd-amd64:3.1.12
livenessProbe:
exec:
command:
- /bin/sh
- -ec
- ETCDCTL_API=3 etcdctl --endpoints=https://[::1]:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt
--cert=/etc/kubernetes/pki/etcd/healthcheck-client.crt --key=/etc/kubernetes/pki/etcd/healthcheck-client.key
get foo
failureThreshold: 8
initialDelaySeconds: 15
timeoutSeconds: 15
name: etcd
resources: {}
volumeMounts:
- mountPath: /var/lib/etcd
name: etcd-data
- mountPath: /etc/kubernetes/pki/etcd
name: etcd-certs
hostNetwork: true
volumes:
- hostPath:
path: /var/lib/etcd
type: DirectoryOrCreate
name: etcd-data
- hostPath:
path: /etc/kubernetes/pki/etcd
type: DirectoryOrCreate
name: etcd-certs
status: {}
`
insecureEtcdPod = `# generated by kubeadm v1.9.6
apiVersion: v1
kind: Pod
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
component: etcd
tier: control-plane
name: etcd
namespace: kube-system
spec:
containers:
- command:
- etcd
- --listen-client-urls=http://127.0.0.1:2379
- --advertise-client-urls=http://127.0.0.1:2379
- --data-dir=/var/lib/etcd
image: gcr.io/google_containers/etcd-amd64:3.1.11
livenessProbe:
failureThreshold: 8
httpGet:
host: 127.0.0.1
path: /health
port: 2379
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 15
name: etcd
resources: {}
volumeMounts:
- mountPath: /var/lib/etcd
name: etcd
hostNetwork: true
volumes:
- hostPath:
path: /var/lib/etcd
type: DirectoryOrCreate
name: etcd
status: {}
`
invalidPod = `---{ broken yaml @@@`
)
func TestPodManifestHasTLS(t *testing.T) {
tests := []struct {
description string
podYaml string
hasTLS bool
expectErr bool
writeManifest bool
}{
{
description: "secure etcd returns true",
podYaml: secureEtcdPod,
hasTLS: true,
writeManifest: true,
expectErr: false,
},
{
description: "secure exposed etcd returns true",
podYaml: secureExposedEtcdPod,
hasTLS: true,
writeManifest: true,
expectErr: false,
},
{
description: "insecure etcd returns false",
podYaml: insecureEtcdPod,
hasTLS: false,
writeManifest: true,
expectErr: false,
},
{
description: "invalid pod fails to unmarshal",
podYaml: invalidPod,
hasTLS: false,
writeManifest: true,
expectErr: true,
},
{
description: "non-existent file returns error",
podYaml: ``,
hasTLS: false,
writeManifest: false,
expectErr: true,
},
}
for _, rt := range tests {
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
manifestPath := filepath.Join(tmpdir, "etcd.yaml")
if rt.writeManifest {
err := ioutil.WriteFile(manifestPath, []byte(rt.podYaml), 0644)
if err != nil {
t.Fatalf("Failed to write pod manifest\n%s\n\tfatal error: %v", rt.description, err)
}
}
hasTLS, actualErr := PodManifestsHaveTLS(tmpdir)
if (actualErr != nil) != rt.expectErr {
t.Errorf(
"PodManifestHasTLS failed\n%s\n\texpected error: %t\n\tgot: %t\n\tactual error: %v",
rt.description,
rt.expectErr,
(actualErr != nil),
actualErr,
)
}
if hasTLS != rt.hasTLS {
t.Errorf("PodManifestHasTLS failed\n%s\n\texpected hasTLS: %t\n\tgot: %t", rt.description, rt.hasTLS, hasTLS)
}
}
}
func TestCheckConfigurationIsHA(t *testing.T) { func TestCheckConfigurationIsHA(t *testing.T) {
var tests = []struct { var tests = []struct {
name string name string