mirror of https://github.com/k3s-io/k3s
Replace k3s cloud provider wrangler controller with core node informer (#2843)
* Replace k3s cloud provider wrangler controller with core node informer Upstream k8s has exposed an interface for cloud providers to access the cloud controller manager's node cache and shared informer since Kubernetes 1.9. This is used by all the other in-tree cloud providers; we should use it too instead of running a dedicated wrangler controller. Doing so also appears to fix an intermittent issue with the uninitialized taint not getting cleared on nodes in CI. Signed-off-by: Brad Davidson <brad.davidson@rancher.com>pull/2862/head
parent
fd991cb964
commit
f152f656a0
|
@ -1,20 +1,23 @@
|
|||
package cloudprovider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/rancher/k3s/pkg/version"
|
||||
"github.com/rancher/wrangler-api/pkg/generated/controllers/core"
|
||||
coreclient "github.com/rancher/wrangler-api/pkg/generated/controllers/core/v1"
|
||||
"github.com/rancher/wrangler/pkg/start"
|
||||
"k8s.io/client-go/informers"
|
||||
informercorev1 "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
)
|
||||
|
||||
type k3s struct {
|
||||
NodeCache coreclient.NodeCache
|
||||
nodeInformer informercorev1.NodeInformer
|
||||
nodeInformerHasSynced cache.InformerSynced
|
||||
}
|
||||
|
||||
var _ cloudprovider.Interface = &k3s{}
|
||||
var _ cloudprovider.InformerUser = &k3s{}
|
||||
|
||||
func init() {
|
||||
cloudprovider.RegisterCloudProvider(version.Program, func(config io.Reader) (cloudprovider.Interface, error) {
|
||||
return &k3s{}, nil
|
||||
|
@ -22,11 +25,11 @@ func init() {
|
|||
}
|
||||
|
||||
func (k *k3s) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) {
|
||||
coreFactory := core.NewFactoryFromConfigOrDie(clientBuilder.ConfigOrDie("cloud-controller-manager"))
|
||||
}
|
||||
|
||||
go start.All(context.Background(), 1, coreFactory)
|
||||
|
||||
k.NodeCache = coreFactory.Core().V1().Node().Cache()
|
||||
func (k *k3s) SetInformers(informerFactory informers.SharedInformerFactory) {
|
||||
k.nodeInformer = informerFactory.Core().V1().Nodes()
|
||||
k.nodeInformerHasSynced = k.nodeInformer.Informer().HasSynced
|
||||
}
|
||||
|
||||
func (k *k3s) Instances() (cloudprovider.Instances, bool) {
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rancher/k3s/pkg/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
@ -30,7 +31,11 @@ func (k *k3s) InstanceExistsByProviderID(ctx context.Context, providerID string)
|
|||
}
|
||||
|
||||
func (k *k3s) InstanceID(ctx context.Context, nodeName types.NodeName) (string, error) {
|
||||
_, err := k.NodeCache.Get(string(nodeName))
|
||||
if k.nodeInformerHasSynced == nil || !k.nodeInformerHasSynced() {
|
||||
return "", errors.New("Node informer has not synced yet")
|
||||
}
|
||||
|
||||
_, err := k.nodeInformer.Lister().Get(string(nodeName))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed to find node %s: %v", nodeName, err)
|
||||
}
|
||||
|
@ -55,7 +60,11 @@ func (k *k3s) InstanceTypeByProviderID(ctx context.Context, providerID string) (
|
|||
|
||||
func (k *k3s) NodeAddresses(ctx context.Context, name types.NodeName) ([]corev1.NodeAddress, error) {
|
||||
addresses := []corev1.NodeAddress{}
|
||||
node, err := k.NodeCache.Get(string(name))
|
||||
if k.nodeInformerHasSynced == nil || !k.nodeInformerHasSynced() {
|
||||
return nil, errors.New("Node informer has not synced yet")
|
||||
}
|
||||
|
||||
node, err := k.nodeInformer.Lister().Get(string(name))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to find node %s: %v", name, err)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue