remove redundant vault api retry logic (#16143)

remove redundant vault api retry logic

We upgraded Vault API module version to a version that has built-in
retry logic. So this code is no longer necessary.
Also add mention of re-configuring the provider in comments.
pull/14186/head
John Eikenberry 2 years ago committed by GitHub
parent 1e7e52e3ef
commit ed7367b6f4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -21,7 +21,6 @@ import (
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/lib/decode" "github.com/hashicorp/consul/lib/decode"
"github.com/hashicorp/consul/lib/retry"
) )
const ( const (
@ -46,14 +45,12 @@ const (
VaultAuthMethodTypeUserpass = "userpass" VaultAuthMethodTypeUserpass = "userpass"
defaultK8SServiceAccountTokenPath = "/var/run/secrets/kubernetes.io/serviceaccount/token" defaultK8SServiceAccountTokenPath = "/var/run/secrets/kubernetes.io/serviceaccount/token"
retryMin = 1 * time.Second
retryMax = 5 * time.Second
retryJitter = 20
) )
var ErrBackendNotMounted = fmt.Errorf("backend not mounted") var (
var ErrBackendNotInitialized = fmt.Errorf("backend not initialized") ErrBackendNotMounted = fmt.Errorf("backend not mounted")
ErrBackendNotInitialized = fmt.Errorf("backend not initialized")
)
type VaultProvider struct { type VaultProvider struct {
config *structs.VaultCAProviderConfig config *structs.VaultCAProviderConfig
@ -100,6 +97,7 @@ func vaultTLSConfig(config *structs.VaultCAProviderConfig) *vaultapi.TLSConfig {
} }
// Configure sets up the provider using the given configuration. // Configure sets up the provider using the given configuration.
// Configure supports being called multiple times to re-configure the provider.
func (v *VaultProvider) Configure(cfg ProviderConfig) error { func (v *VaultProvider) Configure(cfg ProviderConfig) error {
config, err := ParseVaultCAConfig(cfg.RawConfig) config, err := ParseVaultCAConfig(cfg.RawConfig)
if err != nil { if err != nil {
@ -176,6 +174,7 @@ func (v *VaultProvider) Configure(cfg ProviderConfig) error {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
if v.stopWatcher != nil { if v.stopWatcher != nil {
// stop the running watcher loop if we are re-configuring
v.stopWatcher() v.stopWatcher()
} }
v.stopWatcher = cancel v.stopWatcher = cancel
@ -225,33 +224,17 @@ func (v *VaultProvider) renewToken(ctx context.Context, watcher *vaultapi.Lifeti
go watcher.Start() go watcher.Start()
defer watcher.Stop() defer watcher.Stop()
// TODO: Once we've upgraded to a later version of protobuf we can upgrade to github.com/hashicorp/vault/api@1.1.1
// or later and rip this out.
retrier := retry.Waiter{
MinFailures: 5,
MinWait: retryMin,
MaxWait: retryMax,
Jitter: retry.NewJitter(retryJitter),
}
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
return return
case err := <-watcher.DoneCh(): case err := <-watcher.DoneCh():
// In the event we fail to login to Vault or our token is no longer valid we can overwhelm a Vault instance // Watcher has stopped
// with rate limit configured. We would make these requests to Vault as fast as we possibly could and start
// causing all client's to receive 429 response codes. To mitigate that we're sleeping 1 second or less
// before moving on to login again and restart the lifetime watcher. Once we can upgrade to
// github.com/hashicorp/vault/api@v1.1.1 or later the LifetimeWatcher _should_ perform that backoff for us.
if err != nil { if err != nil {
v.logger.Error("Error renewing token for Vault provider", "error", err) v.logger.Error("Error renewing token for Vault provider", "error", err)
} }
// wait at least 1 second after returning from the lifetime watcher
retrier.Wait(ctx)
// If the watcher has exited and auth method is enabled, // If the watcher has exited and auth method is enabled,
// re-authenticate using the auth method and set up a new watcher. // re-authenticate using the auth method and set up a new watcher.
if v.config.AuthMethod != nil { if v.config.AuthMethod != nil {
@ -259,7 +242,7 @@ func (v *VaultProvider) renewToken(ctx context.Context, watcher *vaultapi.Lifeti
loginResp, err := vaultLogin(v.client, v.config.AuthMethod) loginResp, err := vaultLogin(v.client, v.config.AuthMethod)
if err != nil { if err != nil {
v.logger.Error("Error login in to Vault with %q auth method", v.config.AuthMethod.Type) v.logger.Error("Error login in to Vault with %q auth method", v.config.AuthMethod.Type)
// Restart the watcher
go watcher.Start() go watcher.Start()
continue continue
} }
@ -279,12 +262,10 @@ func (v *VaultProvider) renewToken(ctx context.Context, watcher *vaultapi.Lifeti
continue continue
} }
} }
// Restart the watcher.
go watcher.Start() go watcher.Start()
case <-watcher.RenewCh(): case <-watcher.RenewCh():
retrier.Reset()
v.logger.Info("Successfully renewed token for Vault provider") v.logger.Info("Successfully renewed token for Vault provider")
} }
} }
@ -430,7 +411,6 @@ func (v *VaultProvider) setupIntermediatePKIPath() error {
"error", err, "error", err,
) )
} }
} }
} }

@ -274,7 +274,7 @@ func TestVaultCAProvider_RenewToken(t *testing.T) {
firstRenewal, err := secret.Data["last_renewal_time"].(json.Number).Int64() firstRenewal, err := secret.Data["last_renewal_time"].(json.Number).Int64()
require.NoError(t, err) require.NoError(t, err)
// Wait past the TTL and make sure the token has been renewed. // Retry past the TTL and make sure the token has been renewed.
retry.Run(t, func(r *retry.R) { retry.Run(t, func(r *retry.R) {
secret, err = testVault.client.Auth().Token().Lookup(providerToken) secret, err = testVault.client.Auth().Token().Lookup(providerToken)
require.NoError(r, err) require.NoError(r, err)

Loading…
Cancel
Save