Allow node IP to be passed as optional config for kubelet

In case of multiple IPs on the node, this will allow admin to
specify desired IP to be used for the node.
pull/6/head
Ravi Sankar Penta 2015-12-09 18:05:35 -08:00
parent ac7f85046a
commit d0dd6c844b
3 changed files with 95 additions and 2 deletions

View File

@ -158,6 +158,7 @@ type KubeletServer struct {
// Pull images one at a time.
SerializeImagePulls bool
ExperimentalFlannelOverlay bool
NodeIP net.IP
}
// bootstrapping interface for kubelet, targets the initialization protocol
@ -349,6 +350,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
fs.BoolVar(&s.SerializeImagePulls, "serialize-image-pulls", s.SerializeImagePulls, "Pull images one at a time. We recommend *not* changing the default value on nodes that run docker daemon with version < 1.9 or an Aufs storage backend. Issue #10959 has more details. [default=true]")
fs.BoolVar(&s.ExperimentalFlannelOverlay, "experimental-flannel-overlay", s.ExperimentalFlannelOverlay, "Experimental support for starting the kubelet with the default overlay network (flannel). Assumes flanneld is already running in client mode. [default=false]")
fs.IPVar(&s.NodeIP, "node-ip", s.NodeIP, "IP address of the node. If set, kubelet will use this IP address for the node")
}
// UnsecuredKubeletConfig returns a KubeletConfig suitable for being run, or an error if the server setup
@ -488,6 +490,7 @@ func (s *KubeletServer) UnsecuredKubeletConfig() (*KubeletConfig, error) {
VolumePlugins: ProbeVolumePlugins(s.VolumePluginDir),
ExperimentalFlannelOverlay: s.ExperimentalFlannelOverlay,
NodeIP: s.NodeIP,
}, nil
}
@ -964,6 +967,7 @@ type KubeletConfig struct {
VolumePlugins []volume.VolumePlugin
ExperimentalFlannelOverlay bool
NodeIP net.IP
}
func CreateAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.PodConfig, err error) {
@ -1047,6 +1051,7 @@ func CreateAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod
kc.SerializeImagePulls,
kc.ContainerManager,
kc.ExperimentalFlannelOverlay,
kc.NodeIP,
)
if err != nil {

View File

@ -197,6 +197,7 @@ func NewMainKubelet(
serializeImagePulls bool,
containerManager cm.ContainerManager,
flannelExperimentalOverlay bool,
nodeIP net.IP,
) (*Kubelet, error) {
if rootDirectory == "" {
@ -313,10 +314,17 @@ func NewMainKubelet(
containerManager: containerManager,
flannelExperimentalOverlay: flannelExperimentalOverlay,
flannelHelper: NewFlannelHelper(),
nodeIP: nodeIP,
}
if klet.flannelExperimentalOverlay {
glog.Infof("Flannel is in charge of podCIDR and overlay networking.")
}
if klet.nodeIP != nil {
if err := klet.validateNodeIP(); err != nil {
return nil, err
}
glog.Infof("Using node IP: %q", klet.nodeIP.String())
}
if plug, err := network.InitNetworkPlugin(networkPlugins, networkPluginName, &networkHost{klet}); err != nil {
return nil, err
} else {
@ -643,6 +651,42 @@ type Kubelet struct {
// on the fly if we're confident the dbus connetions it opens doesn't
// put the system under duress.
flannelHelper *FlannelHelper
// If non-nil, use this IP address for the node
nodeIP net.IP
}
// Validate given node IP belongs to the current host
func (kl *Kubelet) validateNodeIP() error {
if kl.nodeIP == nil {
return nil
}
// Honor IP limitations set in setNodeStatus()
if kl.nodeIP.IsLoopback() {
return fmt.Errorf("nodeIP can't be loopback address")
}
if kl.nodeIP.To4() == nil {
return fmt.Errorf("nodeIP must be IPv4 address")
}
addrs, err := net.InterfaceAddrs()
if err != nil {
return err
}
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip != nil && ip.Equal(kl.nodeIP) {
return nil
}
}
return fmt.Errorf("Node IP: %q not found in the host's network interfaces", kl.nodeIP.String())
}
func (kl *Kubelet) allSourcesReady() bool {
@ -2663,8 +2707,12 @@ func (kl *Kubelet) setNodeAddress(node *api.Node) error {
}
node.Status.Addresses = nodeAddresses
} else {
addr := net.ParseIP(kl.hostname)
if addr != nil {
if kl.nodeIP != nil {
node.Status.Addresses = []api.NodeAddress{
{Type: api.NodeLegacyHostIP, Address: kl.nodeIP.String()},
{Type: api.NodeInternalIP, Address: kl.nodeIP.String()},
}
} else if addr := net.ParseIP(kl.hostname); addr != nil {
node.Status.Addresses = []api.NodeAddress{
{Type: api.NodeLegacyHostIP, Address: addr.String()},
{Type: api.NodeInternalIP, Address: addr.String()},

View File

@ -787,6 +787,46 @@ func TestGetContainerInfoWithNoContainers(t *testing.T) {
mockCadvisor.AssertExpectations(t)
}
func TestNodeIPParam(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
tests := []struct {
nodeIP string
success bool
testName string
}{
{
nodeIP: "",
success: true,
testName: "IP not set",
},
{
nodeIP: "127.0.0.1",
success: false,
testName: "loopback address",
},
{
nodeIP: "FE80::0202:B3FF:FE1E:8329",
success: false,
testName: "IPv6 address",
},
{
nodeIP: "1.2.3.4",
success: false,
testName: "IPv4 address that doesn't belong to host",
},
}
for _, test := range tests {
kubelet.nodeIP = net.ParseIP(test.nodeIP)
err := kubelet.validateNodeIP()
if err != nil && test.success {
t.Errorf("Test: %s, expected no error but got: %v", test.testName, err)
} else if err == nil && !test.success {
t.Errorf("Test: %s, expected an error", test.testName)
}
}
}
func TestGetContainerInfoWithNoMatchingContainers(t *testing.T) {
testKubelet := newTestKubelet(t)
fakeRuntime := testKubelet.fakeRuntime