Merge pull request #62297 from verult/gce-disks-refactor

Automatic merge from submit-queue (batch tested with PRs 63386, 64624, 62297, 64847). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Refactoring GCE Disk APIs to use generated client

**What this PR does / why we need it**: Improves maintainability and testing of GCE disks code.

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #62296

Note that `gen.go` and `gen_test.go` are generated files.

I'm planning to do a more extensive refactor that takes advantage of the generated cloud provider mocks, but that'll be in a separate PR and will be a larger change.

/cc @davidz627
/assign @saad-ali 
/release-note-none
/sig storage
pull/8/head
Kubernetes Submit Queue 2018-06-06 19:56:29 -07:00 committed by GitHub
commit b4c206bd4a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 415 additions and 1038 deletions

View File

@ -31,7 +31,6 @@ go_library(
"gce_loadbalancer_internal.go",
"gce_loadbalancer_naming.go",
"gce_networkendpointgroup.go",
"gce_op.go",
"gce_routes.go",
"gce_securitypolicy.go",
"gce_targetpool.go",

View File

@ -4,6 +4,7 @@ go_library(
name = "go_default_library",
srcs = [
"constants.go",
"context.go",
"doc.go",
"gce_projects.go",
"gen.go",

View File

@ -0,0 +1,31 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloud
import (
"context"
"time"
)
const (
defaultCallTimeout = 1 * time.Hour
)
// ContextWithCallTimeout returns a context with a default timeout, used for generated client calls.
func ContextWithCallTimeout() (context.Context, context.CancelFunc) {
return context.WithTimeout(context.Background(), defaultCallTimeout)
}

View File

@ -48,8 +48,7 @@ type Cloud interface {
RegionBackendServices() RegionBackendServices
AlphaRegionBackendServices() AlphaRegionBackendServices
Disks() Disks
AlphaDisks() AlphaDisks
AlphaRegionDisks() AlphaRegionDisks
BetaRegionDisks() BetaRegionDisks
Firewalls() Firewalls
ForwardingRules() ForwardingRules
AlphaForwardingRules() AlphaForwardingRules
@ -88,8 +87,7 @@ func NewGCE(s *Service) *GCE {
gceRegionBackendServices: &GCERegionBackendServices{s},
gceAlphaRegionBackendServices: &GCEAlphaRegionBackendServices{s},
gceDisks: &GCEDisks{s},
gceAlphaDisks: &GCEAlphaDisks{s},
gceAlphaRegionDisks: &GCEAlphaRegionDisks{s},
gceBetaRegionDisks: &GCEBetaRegionDisks{s},
gceFirewalls: &GCEFirewalls{s},
gceForwardingRules: &GCEForwardingRules{s},
gceAlphaForwardingRules: &GCEAlphaForwardingRules{s},
@ -132,8 +130,7 @@ type GCE struct {
gceRegionBackendServices *GCERegionBackendServices
gceAlphaRegionBackendServices *GCEAlphaRegionBackendServices
gceDisks *GCEDisks
gceAlphaDisks *GCEAlphaDisks
gceAlphaRegionDisks *GCEAlphaRegionDisks
gceBetaRegionDisks *GCEBetaRegionDisks
gceFirewalls *GCEFirewalls
gceForwardingRules *GCEForwardingRules
gceAlphaForwardingRules *GCEAlphaForwardingRules
@ -209,14 +206,9 @@ func (gce *GCE) Disks() Disks {
return gce.gceDisks
}
// AlphaDisks returns the interface for the alpha Disks.
func (gce *GCE) AlphaDisks() AlphaDisks {
return gce.gceAlphaDisks
}
// AlphaRegionDisks returns the interface for the alpha RegionDisks.
func (gce *GCE) AlphaRegionDisks() AlphaRegionDisks {
return gce.gceAlphaRegionDisks
// BetaRegionDisks returns the interface for the beta RegionDisks.
func (gce *GCE) BetaRegionDisks() BetaRegionDisks {
return gce.gceBetaRegionDisks
}
// Firewalls returns the interface for the ga Firewalls.
@ -373,8 +365,7 @@ func NewMockGCE(projectRouter ProjectRouter) *MockGCE {
MockRegionBackendServices: NewMockRegionBackendServices(projectRouter, mockRegionBackendServicesObjs),
MockAlphaRegionBackendServices: NewMockAlphaRegionBackendServices(projectRouter, mockRegionBackendServicesObjs),
MockDisks: NewMockDisks(projectRouter, mockDisksObjs),
MockAlphaDisks: NewMockAlphaDisks(projectRouter, mockDisksObjs),
MockAlphaRegionDisks: NewMockAlphaRegionDisks(projectRouter, mockRegionDisksObjs),
MockBetaRegionDisks: NewMockBetaRegionDisks(projectRouter, mockRegionDisksObjs),
MockFirewalls: NewMockFirewalls(projectRouter, mockFirewallsObjs),
MockForwardingRules: NewMockForwardingRules(projectRouter, mockForwardingRulesObjs),
MockAlphaForwardingRules: NewMockAlphaForwardingRules(projectRouter, mockForwardingRulesObjs),
@ -417,8 +408,7 @@ type MockGCE struct {
MockRegionBackendServices *MockRegionBackendServices
MockAlphaRegionBackendServices *MockAlphaRegionBackendServices
MockDisks *MockDisks
MockAlphaDisks *MockAlphaDisks
MockAlphaRegionDisks *MockAlphaRegionDisks
MockBetaRegionDisks *MockBetaRegionDisks
MockFirewalls *MockFirewalls
MockForwardingRules *MockForwardingRules
MockAlphaForwardingRules *MockAlphaForwardingRules
@ -494,14 +484,9 @@ func (mock *MockGCE) Disks() Disks {
return mock.MockDisks
}
// AlphaDisks returns the interface for the alpha Disks.
func (mock *MockGCE) AlphaDisks() AlphaDisks {
return mock.MockAlphaDisks
}
// AlphaRegionDisks returns the interface for the alpha RegionDisks.
func (mock *MockGCE) AlphaRegionDisks() AlphaRegionDisks {
return mock.MockAlphaRegionDisks
// BetaRegionDisks returns the interface for the beta RegionDisks.
func (mock *MockGCE) BetaRegionDisks() BetaRegionDisks {
return mock.MockBetaRegionDisks
}
// Firewalls returns the interface for the ga Firewalls.
@ -718,19 +703,6 @@ type MockDisksObj struct {
Obj interface{}
}
// ToAlpha retrieves the given version of the object.
func (m *MockDisksObj) ToAlpha() *alpha.Disk {
if ret, ok := m.Obj.(*alpha.Disk); ok {
return ret
}
// Convert the object via JSON copying to the type that was requested.
ret := &alpha.Disk{}
if err := copyViaJSON(ret, m.Obj); err != nil {
glog.Errorf("Could not convert %T to *alpha.Disk via JSON: %v", m.Obj, err)
}
return ret
}
// ToGA retrieves the given version of the object.
func (m *MockDisksObj) ToGA() *ga.Disk {
if ret, ok := m.Obj.(*ga.Disk); ok {
@ -1056,15 +1028,15 @@ type MockRegionDisksObj struct {
Obj interface{}
}
// ToAlpha retrieves the given version of the object.
func (m *MockRegionDisksObj) ToAlpha() *alpha.Disk {
if ret, ok := m.Obj.(*alpha.Disk); ok {
// ToBeta retrieves the given version of the object.
func (m *MockRegionDisksObj) ToBeta() *beta.Disk {
if ret, ok := m.Obj.(*beta.Disk); ok {
return ret
}
// Convert the object via JSON copying to the type that was requested.
ret := &alpha.Disk{}
ret := &beta.Disk{}
if err := copyViaJSON(ret, m.Obj); err != nil {
glog.Errorf("Could not convert %T to *alpha.Disk via JSON: %v", m.Obj, err)
glog.Errorf("Could not convert %T to *beta.Disk via JSON: %v", m.Obj, err)
}
return ret
}
@ -4668,6 +4640,7 @@ type Disks interface {
List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Disk, error)
Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error
Delete(ctx context.Context, key *meta.Key) error
Resize(context.Context, *meta.Key, *ga.DisksResizeRequest) error
}
// NewMockDisks returns a new mock for Disks.
@ -4707,6 +4680,7 @@ type MockDisks struct {
ListHook func(ctx context.Context, zone string, fl *filter.F, m *MockDisks) (bool, []*ga.Disk, error)
InsertHook func(ctx context.Context, key *meta.Key, obj *ga.Disk, m *MockDisks) (bool, error)
DeleteHook func(ctx context.Context, key *meta.Key, m *MockDisks) (bool, error)
ResizeHook func(context.Context, *meta.Key, *ga.DisksResizeRequest, *MockDisks) error
// X is extra state that can be used as part of the mock. Generated code
// will not use this field.
@ -4855,6 +4829,14 @@ func (m *MockDisks) Obj(o *ga.Disk) *MockDisksObj {
return &MockDisksObj{o}
}
// Resize is a mock for the corresponding method.
func (m *MockDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.DisksResizeRequest) error {
if m.ResizeHook != nil {
return m.ResizeHook(ctx, key, arg0, m)
}
return nil
}
// GCEDisks is a simplifying adapter for the GCE Disks.
type GCEDisks struct {
s *Service
@ -4996,17 +4978,51 @@ func (g *GCEDisks) Delete(ctx context.Context, key *meta.Key) error {
return err
}
// AlphaDisks is an interface that allows for mocking of Disks.
type AlphaDisks interface {
Get(ctx context.Context, key *meta.Key) (*alpha.Disk, error)
List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Disk, error)
Insert(ctx context.Context, key *meta.Key, obj *alpha.Disk) error
Delete(ctx context.Context, key *meta.Key) error
// Resize is a method on GCEDisks.
func (g *GCEDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.DisksResizeRequest) error {
glog.V(5).Infof("GCEDisks.Resize(%v, %v, ...): called", ctx, key)
if !key.Valid() {
glog.V(2).Infof("GCEDisks.Resize(%v, %v, ...): key is invalid (%#v)", ctx, key, key)
return fmt.Errorf("invalid GCE key (%+v)", key)
}
projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks")
rk := &RateLimitKey{
ProjectID: projectID,
Operation: "Resize",
Version: meta.Version("ga"),
Service: "Disks",
}
glog.V(5).Infof("GCEDisks.Resize(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk)
if err := g.s.RateLimiter.Accept(ctx, rk); err != nil {
glog.V(4).Infof("GCEDisks.Resize(%v, %v, ...): RateLimiter error: %v", ctx, key, err)
return err
}
call := g.s.GA.Disks.Resize(projectID, key.Zone, key.Name, arg0)
call.Context(ctx)
op, err := call.Do()
if err != nil {
glog.V(4).Infof("GCEDisks.Resize(%v, %v, ...) = %+v", ctx, key, err)
return err
}
err = g.s.WaitForCompletion(ctx, op)
glog.V(4).Infof("GCEDisks.Resize(%v, %v, ...) = %+v", ctx, key, err)
return err
}
// NewMockAlphaDisks returns a new mock for Disks.
func NewMockAlphaDisks(pr ProjectRouter, objs map[meta.Key]*MockDisksObj) *MockAlphaDisks {
mock := &MockAlphaDisks{
// BetaRegionDisks is an interface that allows for mocking of RegionDisks.
type BetaRegionDisks interface {
Get(ctx context.Context, key *meta.Key) (*beta.Disk, error)
List(ctx context.Context, region string, fl *filter.F) ([]*beta.Disk, error)
Insert(ctx context.Context, key *meta.Key, obj *beta.Disk) error
Delete(ctx context.Context, key *meta.Key) error
Resize(context.Context, *meta.Key, *beta.RegionDisksResizeRequest) error
}
// NewMockBetaRegionDisks returns a new mock for RegionDisks.
func NewMockBetaRegionDisks(pr ProjectRouter, objs map[meta.Key]*MockRegionDisksObj) *MockBetaRegionDisks {
mock := &MockBetaRegionDisks{
ProjectRouter: pr,
Objects: objs,
@ -5017,342 +5033,8 @@ func NewMockAlphaDisks(pr ProjectRouter, objs map[meta.Key]*MockDisksObj) *MockA
return mock
}
// MockAlphaDisks is the mock for Disks.
type MockAlphaDisks struct {
Lock sync.Mutex
ProjectRouter ProjectRouter
// Objects maintained by the mock.
Objects map[meta.Key]*MockDisksObj
// If an entry exists for the given key and operation, then the error
// will be returned instead of the operation.
GetError map[meta.Key]error
ListError *error
InsertError map[meta.Key]error
DeleteError map[meta.Key]error
// xxxHook allow you to intercept the standard processing of the mock in
// order to add your own logic. Return (true, _, _) to prevent the normal
// execution flow of the mock. Return (false, nil, nil) to continue with
// normal mock behavior/ after the hook function executes.
GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaDisks) (bool, *alpha.Disk, error)
ListHook func(ctx context.Context, zone string, fl *filter.F, m *MockAlphaDisks) (bool, []*alpha.Disk, error)
InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.Disk, m *MockAlphaDisks) (bool, error)
DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaDisks) (bool, error)
// X is extra state that can be used as part of the mock. Generated code
// will not use this field.
X interface{}
}
// Get returns the object from the mock.
func (m *MockAlphaDisks) Get(ctx context.Context, key *meta.Key) (*alpha.Disk, error) {
if m.GetHook != nil {
if intercept, obj, err := m.GetHook(ctx, key, m); intercept {
glog.V(5).Infof("MockAlphaDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err)
return obj, err
}
}
if !key.Valid() {
return nil, fmt.Errorf("invalid GCE key (%+v)", key)
}
m.Lock.Lock()
defer m.Lock.Unlock()
if err, ok := m.GetError[*key]; ok {
glog.V(5).Infof("MockAlphaDisks.Get(%v, %s) = nil, %v", ctx, key, err)
return nil, err
}
if obj, ok := m.Objects[*key]; ok {
typedObj := obj.ToAlpha()
glog.V(5).Infof("MockAlphaDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj)
return typedObj, nil
}
err := &googleapi.Error{
Code: http.StatusNotFound,
Message: fmt.Sprintf("MockAlphaDisks %v not found", key),
}
glog.V(5).Infof("MockAlphaDisks.Get(%v, %s) = nil, %v", ctx, key, err)
return nil, err
}
// List all of the objects in the mock in the given zone.
func (m *MockAlphaDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Disk, error) {
if m.ListHook != nil {
if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept {
glog.V(5).Infof("MockAlphaDisks.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err)
return objs, err
}
}
m.Lock.Lock()
defer m.Lock.Unlock()
if m.ListError != nil {
err := *m.ListError
glog.V(5).Infof("MockAlphaDisks.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err)
return nil, *m.ListError
}
var objs []*alpha.Disk
for key, obj := range m.Objects {
if key.Zone != zone {
continue
}
if !fl.Match(obj.ToAlpha()) {
continue
}
objs = append(objs, obj.ToAlpha())
}
glog.V(5).Infof("MockAlphaDisks.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs))
return objs, nil
}
// Insert is a mock for inserting/creating a new object.
func (m *MockAlphaDisks) Insert(ctx context.Context, key *meta.Key, obj *alpha.Disk) error {
if m.InsertHook != nil {
if intercept, err := m.InsertHook(ctx, key, obj, m); intercept {
glog.V(5).Infof("MockAlphaDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err)
return err
}
}
if !key.Valid() {
return fmt.Errorf("invalid GCE key (%+v)", key)
}
m.Lock.Lock()
defer m.Lock.Unlock()
if err, ok := m.InsertError[*key]; ok {
glog.V(5).Infof("MockAlphaDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err)
return err
}
if _, ok := m.Objects[*key]; ok {
err := &googleapi.Error{
Code: http.StatusConflict,
Message: fmt.Sprintf("MockAlphaDisks %v exists", key),
}
glog.V(5).Infof("MockAlphaDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err)
return err
}
obj.Name = key.Name
projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "disks")
obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "disks", key)
m.Objects[*key] = &MockDisksObj{obj}
glog.V(5).Infof("MockAlphaDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj)
return nil
}
// Delete is a mock for deleting the object.
func (m *MockAlphaDisks) Delete(ctx context.Context, key *meta.Key) error {
if m.DeleteHook != nil {
if intercept, err := m.DeleteHook(ctx, key, m); intercept {
glog.V(5).Infof("MockAlphaDisks.Delete(%v, %v) = %v", ctx, key, err)
return err
}
}
if !key.Valid() {
return fmt.Errorf("invalid GCE key (%+v)", key)
}
m.Lock.Lock()
defer m.Lock.Unlock()
if err, ok := m.DeleteError[*key]; ok {
glog.V(5).Infof("MockAlphaDisks.Delete(%v, %v) = %v", ctx, key, err)
return err
}
if _, ok := m.Objects[*key]; !ok {
err := &googleapi.Error{
Code: http.StatusNotFound,
Message: fmt.Sprintf("MockAlphaDisks %v not found", key),
}
glog.V(5).Infof("MockAlphaDisks.Delete(%v, %v) = %v", ctx, key, err)
return err
}
delete(m.Objects, *key)
glog.V(5).Infof("MockAlphaDisks.Delete(%v, %v) = nil", ctx, key)
return nil
}
// Obj wraps the object for use in the mock.
func (m *MockAlphaDisks) Obj(o *alpha.Disk) *MockDisksObj {
return &MockDisksObj{o}
}
// GCEAlphaDisks is a simplifying adapter for the GCE Disks.
type GCEAlphaDisks struct {
s *Service
}
// Get the Disk named by key.
func (g *GCEAlphaDisks) Get(ctx context.Context, key *meta.Key) (*alpha.Disk, error) {
glog.V(5).Infof("GCEAlphaDisks.Get(%v, %v): called", ctx, key)
if !key.Valid() {
glog.V(2).Infof("GCEAlphaDisks.Get(%v, %v): key is invalid (%#v)", ctx, key, key)
return nil, fmt.Errorf("invalid GCE key (%#v)", key)
}
projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Disks")
rk := &RateLimitKey{
ProjectID: projectID,
Operation: "Get",
Version: meta.Version("alpha"),
Service: "Disks",
}
glog.V(5).Infof("GCEAlphaDisks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk)
if err := g.s.RateLimiter.Accept(ctx, rk); err != nil {
glog.V(4).Infof("GCEAlphaDisks.Get(%v, %v): RateLimiter error: %v", ctx, key, err)
return nil, err
}
call := g.s.Alpha.Disks.Get(projectID, key.Zone, key.Name)
call.Context(ctx)
v, err := call.Do()
glog.V(4).Infof("GCEAlphaDisks.Get(%v, %v) = %+v, %v", ctx, key, v, err)
return v, err
}
// List all Disk objects.
func (g *GCEAlphaDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Disk, error) {
glog.V(5).Infof("GCEAlphaDisks.List(%v, %v, %v) called", ctx, zone, fl)
projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Disks")
rk := &RateLimitKey{
ProjectID: projectID,
Operation: "List",
Version: meta.Version("alpha"),
Service: "Disks",
}
if err := g.s.RateLimiter.Accept(ctx, rk); err != nil {
return nil, err
}
glog.V(5).Infof("GCEAlphaDisks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk)
call := g.s.Alpha.Disks.List(projectID, zone)
if fl != filter.None {
call.Filter(fl.String())
}
var all []*alpha.Disk
f := func(l *alpha.DiskList) error {
glog.V(5).Infof("GCEAlphaDisks.List(%v, ..., %v): page %+v", ctx, fl, l)
all = append(all, l.Items...)
return nil
}
if err := call.Pages(ctx, f); err != nil {
glog.V(4).Infof("GCEAlphaDisks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err)
return nil, err
}
if glog.V(4) {
glog.V(4).Infof("GCEAlphaDisks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil)
} else if glog.V(5) {
var asStr []string
for _, o := range all {
asStr = append(asStr, fmt.Sprintf("%+v", o))
}
glog.V(5).Infof("GCEAlphaDisks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil)
}
return all, nil
}
// Insert Disk with key of value obj.
func (g *GCEAlphaDisks) Insert(ctx context.Context, key *meta.Key, obj *alpha.Disk) error {
glog.V(5).Infof("GCEAlphaDisks.Insert(%v, %v, %+v): called", ctx, key, obj)
if !key.Valid() {
glog.V(2).Infof("GCEAlphaDisks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key)
return fmt.Errorf("invalid GCE key (%+v)", key)
}
projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Disks")
rk := &RateLimitKey{
ProjectID: projectID,
Operation: "Insert",
Version: meta.Version("alpha"),
Service: "Disks",
}
glog.V(5).Infof("GCEAlphaDisks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk)
if err := g.s.RateLimiter.Accept(ctx, rk); err != nil {
glog.V(4).Infof("GCEAlphaDisks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err)
return err
}
obj.Name = key.Name
call := g.s.Alpha.Disks.Insert(projectID, key.Zone, obj)
call.Context(ctx)
op, err := call.Do()
if err != nil {
glog.V(4).Infof("GCEAlphaDisks.Insert(%v, %v, ...) = %+v", ctx, key, err)
return err
}
err = g.s.WaitForCompletion(ctx, op)
glog.V(4).Infof("GCEAlphaDisks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err)
return err
}
// Delete the Disk referenced by key.
func (g *GCEAlphaDisks) Delete(ctx context.Context, key *meta.Key) error {
glog.V(5).Infof("GCEAlphaDisks.Delete(%v, %v): called", ctx, key)
if !key.Valid() {
glog.V(2).Infof("GCEAlphaDisks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key)
return fmt.Errorf("invalid GCE key (%+v)", key)
}
projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Disks")
rk := &RateLimitKey{
ProjectID: projectID,
Operation: "Delete",
Version: meta.Version("alpha"),
Service: "Disks",
}
glog.V(5).Infof("GCEAlphaDisks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk)
if err := g.s.RateLimiter.Accept(ctx, rk); err != nil {
glog.V(4).Infof("GCEAlphaDisks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err)
return err
}
call := g.s.Alpha.Disks.Delete(projectID, key.Zone, key.Name)
call.Context(ctx)
op, err := call.Do()
if err != nil {
glog.V(4).Infof("GCEAlphaDisks.Delete(%v, %v) = %v", ctx, key, err)
return err
}
err = g.s.WaitForCompletion(ctx, op)
glog.V(4).Infof("GCEAlphaDisks.Delete(%v, %v) = %v", ctx, key, err)
return err
}
// AlphaRegionDisks is an interface that allows for mocking of RegionDisks.
type AlphaRegionDisks interface {
Get(ctx context.Context, key *meta.Key) (*alpha.Disk, error)
List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Disk, error)
Insert(ctx context.Context, key *meta.Key, obj *alpha.Disk) error
Delete(ctx context.Context, key *meta.Key) error
}
// NewMockAlphaRegionDisks returns a new mock for RegionDisks.
func NewMockAlphaRegionDisks(pr ProjectRouter, objs map[meta.Key]*MockRegionDisksObj) *MockAlphaRegionDisks {
mock := &MockAlphaRegionDisks{
ProjectRouter: pr,
Objects: objs,
GetError: map[meta.Key]error{},
InsertError: map[meta.Key]error{},
DeleteError: map[meta.Key]error{},
}
return mock
}
// MockAlphaRegionDisks is the mock for RegionDisks.
type MockAlphaRegionDisks struct {
// MockBetaRegionDisks is the mock for RegionDisks.
type MockBetaRegionDisks struct {
Lock sync.Mutex
ProjectRouter ProjectRouter
@ -5371,10 +5053,11 @@ type MockAlphaRegionDisks struct {
// order to add your own logic. Return (true, _, _) to prevent the normal
// execution flow of the mock. Return (false, nil, nil) to continue with
// normal mock behavior/ after the hook function executes.
GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaRegionDisks) (bool, *alpha.Disk, error)
ListHook func(ctx context.Context, region string, fl *filter.F, m *MockAlphaRegionDisks) (bool, []*alpha.Disk, error)
InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.Disk, m *MockAlphaRegionDisks) (bool, error)
DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaRegionDisks) (bool, error)
GetHook func(ctx context.Context, key *meta.Key, m *MockBetaRegionDisks) (bool, *beta.Disk, error)
ListHook func(ctx context.Context, region string, fl *filter.F, m *MockBetaRegionDisks) (bool, []*beta.Disk, error)
InsertHook func(ctx context.Context, key *meta.Key, obj *beta.Disk, m *MockBetaRegionDisks) (bool, error)
DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaRegionDisks) (bool, error)
ResizeHook func(context.Context, *meta.Key, *beta.RegionDisksResizeRequest, *MockBetaRegionDisks) error
// X is extra state that can be used as part of the mock. Generated code
// will not use this field.
@ -5382,10 +5065,10 @@ type MockAlphaRegionDisks struct {
}
// Get returns the object from the mock.
func (m *MockAlphaRegionDisks) Get(ctx context.Context, key *meta.Key) (*alpha.Disk, error) {
func (m *MockBetaRegionDisks) Get(ctx context.Context, key *meta.Key) (*beta.Disk, error) {
if m.GetHook != nil {
if intercept, obj, err := m.GetHook(ctx, key, m); intercept {
glog.V(5).Infof("MockAlphaRegionDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err)
glog.V(5).Infof("MockBetaRegionDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err)
return obj, err
}
}
@ -5397,28 +5080,28 @@ func (m *MockAlphaRegionDisks) Get(ctx context.Context, key *meta.Key) (*alpha.D
defer m.Lock.Unlock()
if err, ok := m.GetError[*key]; ok {
glog.V(5).Infof("MockAlphaRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err)
glog.V(5).Infof("MockBetaRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err)
return nil, err
}
if obj, ok := m.Objects[*key]; ok {
typedObj := obj.ToAlpha()
glog.V(5).Infof("MockAlphaRegionDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj)
typedObj := obj.ToBeta()
glog.V(5).Infof("MockBetaRegionDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj)
return typedObj, nil
}
err := &googleapi.Error{
Code: http.StatusNotFound,
Message: fmt.Sprintf("MockAlphaRegionDisks %v not found", key),
Message: fmt.Sprintf("MockBetaRegionDisks %v not found", key),
}
glog.V(5).Infof("MockAlphaRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err)
glog.V(5).Infof("MockBetaRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err)
return nil, err
}
// List all of the objects in the mock in the given region.
func (m *MockAlphaRegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Disk, error) {
func (m *MockBetaRegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*beta.Disk, error) {
if m.ListHook != nil {
if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept {
glog.V(5).Infof("MockAlphaRegionDisks.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err)
glog.V(5).Infof("MockBetaRegionDisks.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err)
return objs, err
}
}
@ -5428,31 +5111,31 @@ func (m *MockAlphaRegionDisks) List(ctx context.Context, region string, fl *filt
if m.ListError != nil {
err := *m.ListError
glog.V(5).Infof("MockAlphaRegionDisks.List(%v, %q, %v) = nil, %v", ctx, region, fl, err)
glog.V(5).Infof("MockBetaRegionDisks.List(%v, %q, %v) = nil, %v", ctx, region, fl, err)
return nil, *m.ListError
}
var objs []*alpha.Disk
var objs []*beta.Disk
for key, obj := range m.Objects {
if key.Region != region {
continue
}
if !fl.Match(obj.ToAlpha()) {
if !fl.Match(obj.ToBeta()) {
continue
}
objs = append(objs, obj.ToAlpha())
objs = append(objs, obj.ToBeta())
}
glog.V(5).Infof("MockAlphaRegionDisks.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs))
glog.V(5).Infof("MockBetaRegionDisks.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs))
return objs, nil
}
// Insert is a mock for inserting/creating a new object.
func (m *MockAlphaRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *alpha.Disk) error {
func (m *MockBetaRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *beta.Disk) error {
if m.InsertHook != nil {
if intercept, err := m.InsertHook(ctx, key, obj, m); intercept {
glog.V(5).Infof("MockAlphaRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err)
glog.V(5).Infof("MockBetaRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err)
return err
}
}
@ -5464,32 +5147,32 @@ func (m *MockAlphaRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *a
defer m.Lock.Unlock()
if err, ok := m.InsertError[*key]; ok {
glog.V(5).Infof("MockAlphaRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err)
glog.V(5).Infof("MockBetaRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err)
return err
}
if _, ok := m.Objects[*key]; ok {
err := &googleapi.Error{
Code: http.StatusConflict,
Message: fmt.Sprintf("MockAlphaRegionDisks %v exists", key),
Message: fmt.Sprintf("MockBetaRegionDisks %v exists", key),
}
glog.V(5).Infof("MockAlphaRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err)
glog.V(5).Infof("MockBetaRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err)
return err
}
obj.Name = key.Name
projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "disks")
obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "disks", key)
projectID := m.ProjectRouter.ProjectID(ctx, "beta", "disks")
obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "disks", key)
m.Objects[*key] = &MockRegionDisksObj{obj}
glog.V(5).Infof("MockAlphaRegionDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj)
glog.V(5).Infof("MockBetaRegionDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj)
return nil
}
// Delete is a mock for deleting the object.
func (m *MockAlphaRegionDisks) Delete(ctx context.Context, key *meta.Key) error {
func (m *MockBetaRegionDisks) Delete(ctx context.Context, key *meta.Key) error {
if m.DeleteHook != nil {
if intercept, err := m.DeleteHook(ctx, key, m); intercept {
glog.V(5).Infof("MockAlphaRegionDisks.Delete(%v, %v) = %v", ctx, key, err)
glog.V(5).Infof("MockBetaRegionDisks.Delete(%v, %v) = %v", ctx, key, err)
return err
}
}
@ -5501,166 +5184,207 @@ func (m *MockAlphaRegionDisks) Delete(ctx context.Context, key *meta.Key) error
defer m.Lock.Unlock()
if err, ok := m.DeleteError[*key]; ok {
glog.V(5).Infof("MockAlphaRegionDisks.Delete(%v, %v) = %v", ctx, key, err)
glog.V(5).Infof("MockBetaRegionDisks.Delete(%v, %v) = %v", ctx, key, err)
return err
}
if _, ok := m.Objects[*key]; !ok {
err := &googleapi.Error{
Code: http.StatusNotFound,
Message: fmt.Sprintf("MockAlphaRegionDisks %v not found", key),
Message: fmt.Sprintf("MockBetaRegionDisks %v not found", key),
}
glog.V(5).Infof("MockAlphaRegionDisks.Delete(%v, %v) = %v", ctx, key, err)
glog.V(5).Infof("MockBetaRegionDisks.Delete(%v, %v) = %v", ctx, key, err)
return err
}
delete(m.Objects, *key)
glog.V(5).Infof("MockAlphaRegionDisks.Delete(%v, %v) = nil", ctx, key)
glog.V(5).Infof("MockBetaRegionDisks.Delete(%v, %v) = nil", ctx, key)
return nil
}
// Obj wraps the object for use in the mock.
func (m *MockAlphaRegionDisks) Obj(o *alpha.Disk) *MockRegionDisksObj {
func (m *MockBetaRegionDisks) Obj(o *beta.Disk) *MockRegionDisksObj {
return &MockRegionDisksObj{o}
}
// GCEAlphaRegionDisks is a simplifying adapter for the GCE RegionDisks.
type GCEAlphaRegionDisks struct {
// Resize is a mock for the corresponding method.
func (m *MockBetaRegionDisks) Resize(ctx context.Context, key *meta.Key, arg0 *beta.RegionDisksResizeRequest) error {
if m.ResizeHook != nil {
return m.ResizeHook(ctx, key, arg0, m)
}
return nil
}
// GCEBetaRegionDisks is a simplifying adapter for the GCE RegionDisks.
type GCEBetaRegionDisks struct {
s *Service
}
// Get the Disk named by key.
func (g *GCEAlphaRegionDisks) Get(ctx context.Context, key *meta.Key) (*alpha.Disk, error) {
glog.V(5).Infof("GCEAlphaRegionDisks.Get(%v, %v): called", ctx, key)
func (g *GCEBetaRegionDisks) Get(ctx context.Context, key *meta.Key) (*beta.Disk, error) {
glog.V(5).Infof("GCEBetaRegionDisks.Get(%v, %v): called", ctx, key)
if !key.Valid() {
glog.V(2).Infof("GCEAlphaRegionDisks.Get(%v, %v): key is invalid (%#v)", ctx, key, key)
glog.V(2).Infof("GCEBetaRegionDisks.Get(%v, %v): key is invalid (%#v)", ctx, key, key)
return nil, fmt.Errorf("invalid GCE key (%#v)", key)
}
projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionDisks")
projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionDisks")
rk := &RateLimitKey{
ProjectID: projectID,
Operation: "Get",
Version: meta.Version("alpha"),
Version: meta.Version("beta"),
Service: "RegionDisks",
}
glog.V(5).Infof("GCEAlphaRegionDisks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk)
glog.V(5).Infof("GCEBetaRegionDisks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk)
if err := g.s.RateLimiter.Accept(ctx, rk); err != nil {
glog.V(4).Infof("GCEAlphaRegionDisks.Get(%v, %v): RateLimiter error: %v", ctx, key, err)
glog.V(4).Infof("GCEBetaRegionDisks.Get(%v, %v): RateLimiter error: %v", ctx, key, err)
return nil, err
}
call := g.s.Alpha.RegionDisks.Get(projectID, key.Region, key.Name)
call := g.s.Beta.RegionDisks.Get(projectID, key.Region, key.Name)
call.Context(ctx)
v, err := call.Do()
glog.V(4).Infof("GCEAlphaRegionDisks.Get(%v, %v) = %+v, %v", ctx, key, v, err)
glog.V(4).Infof("GCEBetaRegionDisks.Get(%v, %v) = %+v, %v", ctx, key, v, err)
return v, err
}
// List all Disk objects.
func (g *GCEAlphaRegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Disk, error) {
glog.V(5).Infof("GCEAlphaRegionDisks.List(%v, %v, %v) called", ctx, region, fl)
projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionDisks")
func (g *GCEBetaRegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*beta.Disk, error) {
glog.V(5).Infof("GCEBetaRegionDisks.List(%v, %v, %v) called", ctx, region, fl)
projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionDisks")
rk := &RateLimitKey{
ProjectID: projectID,
Operation: "List",
Version: meta.Version("alpha"),
Version: meta.Version("beta"),
Service: "RegionDisks",
}
if err := g.s.RateLimiter.Accept(ctx, rk); err != nil {
return nil, err
}
glog.V(5).Infof("GCEAlphaRegionDisks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk)
call := g.s.Alpha.RegionDisks.List(projectID, region)
glog.V(5).Infof("GCEBetaRegionDisks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk)
call := g.s.Beta.RegionDisks.List(projectID, region)
if fl != filter.None {
call.Filter(fl.String())
}
var all []*alpha.Disk
f := func(l *alpha.DiskList) error {
glog.V(5).Infof("GCEAlphaRegionDisks.List(%v, ..., %v): page %+v", ctx, fl, l)
var all []*beta.Disk
f := func(l *beta.DiskList) error {
glog.V(5).Infof("GCEBetaRegionDisks.List(%v, ..., %v): page %+v", ctx, fl, l)
all = append(all, l.Items...)
return nil
}
if err := call.Pages(ctx, f); err != nil {
glog.V(4).Infof("GCEAlphaRegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err)
glog.V(4).Infof("GCEBetaRegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err)
return nil, err
}
if glog.V(4) {
glog.V(4).Infof("GCEAlphaRegionDisks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil)
glog.V(4).Infof("GCEBetaRegionDisks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil)
} else if glog.V(5) {
var asStr []string
for _, o := range all {
asStr = append(asStr, fmt.Sprintf("%+v", o))
}
glog.V(5).Infof("GCEAlphaRegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil)
glog.V(5).Infof("GCEBetaRegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil)
}
return all, nil
}
// Insert Disk with key of value obj.
func (g *GCEAlphaRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *alpha.Disk) error {
glog.V(5).Infof("GCEAlphaRegionDisks.Insert(%v, %v, %+v): called", ctx, key, obj)
func (g *GCEBetaRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *beta.Disk) error {
glog.V(5).Infof("GCEBetaRegionDisks.Insert(%v, %v, %+v): called", ctx, key, obj)
if !key.Valid() {
glog.V(2).Infof("GCEAlphaRegionDisks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key)
glog.V(2).Infof("GCEBetaRegionDisks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key)
return fmt.Errorf("invalid GCE key (%+v)", key)
}
projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionDisks")
projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionDisks")
rk := &RateLimitKey{
ProjectID: projectID,
Operation: "Insert",
Version: meta.Version("alpha"),
Version: meta.Version("beta"),
Service: "RegionDisks",
}
glog.V(5).Infof("GCEAlphaRegionDisks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk)
glog.V(5).Infof("GCEBetaRegionDisks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk)
if err := g.s.RateLimiter.Accept(ctx, rk); err != nil {
glog.V(4).Infof("GCEAlphaRegionDisks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err)
glog.V(4).Infof("GCEBetaRegionDisks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err)
return err
}
obj.Name = key.Name
call := g.s.Alpha.RegionDisks.Insert(projectID, key.Region, obj)
call := g.s.Beta.RegionDisks.Insert(projectID, key.Region, obj)
call.Context(ctx)
op, err := call.Do()
if err != nil {
glog.V(4).Infof("GCEAlphaRegionDisks.Insert(%v, %v, ...) = %+v", ctx, key, err)
glog.V(4).Infof("GCEBetaRegionDisks.Insert(%v, %v, ...) = %+v", ctx, key, err)
return err
}
err = g.s.WaitForCompletion(ctx, op)
glog.V(4).Infof("GCEAlphaRegionDisks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err)
glog.V(4).Infof("GCEBetaRegionDisks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err)
return err
}
// Delete the Disk referenced by key.
func (g *GCEAlphaRegionDisks) Delete(ctx context.Context, key *meta.Key) error {
glog.V(5).Infof("GCEAlphaRegionDisks.Delete(%v, %v): called", ctx, key)
func (g *GCEBetaRegionDisks) Delete(ctx context.Context, key *meta.Key) error {
glog.V(5).Infof("GCEBetaRegionDisks.Delete(%v, %v): called", ctx, key)
if !key.Valid() {
glog.V(2).Infof("GCEAlphaRegionDisks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key)
glog.V(2).Infof("GCEBetaRegionDisks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key)
return fmt.Errorf("invalid GCE key (%+v)", key)
}
projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionDisks")
projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionDisks")
rk := &RateLimitKey{
ProjectID: projectID,
Operation: "Delete",
Version: meta.Version("alpha"),
Version: meta.Version("beta"),
Service: "RegionDisks",
}
glog.V(5).Infof("GCEAlphaRegionDisks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk)
glog.V(5).Infof("GCEBetaRegionDisks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk)
if err := g.s.RateLimiter.Accept(ctx, rk); err != nil {
glog.V(4).Infof("GCEAlphaRegionDisks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err)
glog.V(4).Infof("GCEBetaRegionDisks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err)
return err
}
call := g.s.Alpha.RegionDisks.Delete(projectID, key.Region, key.Name)
call := g.s.Beta.RegionDisks.Delete(projectID, key.Region, key.Name)
call.Context(ctx)
op, err := call.Do()
if err != nil {
glog.V(4).Infof("GCEAlphaRegionDisks.Delete(%v, %v) = %v", ctx, key, err)
glog.V(4).Infof("GCEBetaRegionDisks.Delete(%v, %v) = %v", ctx, key, err)
return err
}
err = g.s.WaitForCompletion(ctx, op)
glog.V(4).Infof("GCEAlphaRegionDisks.Delete(%v, %v) = %v", ctx, key, err)
glog.V(4).Infof("GCEBetaRegionDisks.Delete(%v, %v) = %v", ctx, key, err)
return err
}
// Resize is a method on GCEBetaRegionDisks.
func (g *GCEBetaRegionDisks) Resize(ctx context.Context, key *meta.Key, arg0 *beta.RegionDisksResizeRequest) error {
glog.V(5).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...): called", ctx, key)
if !key.Valid() {
glog.V(2).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...): key is invalid (%#v)", ctx, key, key)
return fmt.Errorf("invalid GCE key (%+v)", key)
}
projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionDisks")
rk := &RateLimitKey{
ProjectID: projectID,
Operation: "Resize",
Version: meta.Version("beta"),
Service: "RegionDisks",
}
glog.V(5).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk)
if err := g.s.RateLimiter.Accept(ctx, rk); err != nil {
glog.V(4).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...): RateLimiter error: %v", ctx, key, err)
return err
}
call := g.s.Beta.RegionDisks.Resize(projectID, key.Region, key.Name, arg0)
call.Context(ctx)
op, err := call.Do()
if err != nil {
glog.V(4).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...) = %+v", ctx, key, err)
return err
}
err = g.s.WaitForCompletion(ctx, op)
glog.V(4).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...) = %+v", ctx, key, err)
return err
}

View File

@ -312,28 +312,17 @@ func TestDisksGroup(t *testing.T) {
mock := NewMockGCE(pr)
var key *meta.Key
keyAlpha := meta.ZonalKey("key-alpha", "location")
key = keyAlpha
keyGA := meta.ZonalKey("key-ga", "location")
key = keyGA
// Ignore unused variables.
_, _, _ = ctx, mock, key
// Get not found.
if _, err := mock.AlphaDisks().Get(ctx, key); err == nil {
t.Errorf("AlphaDisks().Get(%v, %v) = _, nil; want error", ctx, key)
}
if _, err := mock.Disks().Get(ctx, key); err == nil {
t.Errorf("Disks().Get(%v, %v) = _, nil; want error", ctx, key)
}
// Insert.
{
obj := &alpha.Disk{}
if err := mock.AlphaDisks().Insert(ctx, keyAlpha, obj); err != nil {
t.Errorf("AlphaDisks().Insert(%v, %v, %v) = %v; want nil", ctx, keyAlpha, obj, err)
}
}
{
obj := &ga.Disk{}
if err := mock.Disks().Insert(ctx, keyGA, obj); err != nil {
@ -342,35 +331,16 @@ func TestDisksGroup(t *testing.T) {
}
// Get across versions.
if obj, err := mock.AlphaDisks().Get(ctx, key); err != nil {
t.Errorf("AlphaDisks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
}
if obj, err := mock.Disks().Get(ctx, key); err != nil {
t.Errorf("Disks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
}
// List.
mock.MockAlphaDisks.Objects[*keyAlpha] = mock.MockAlphaDisks.Obj(&alpha.Disk{Name: keyAlpha.Name})
mock.MockDisks.Objects[*keyGA] = mock.MockDisks.Obj(&ga.Disk{Name: keyGA.Name})
want := map[string]bool{
"key-alpha": true,
"key-ga": true,
"key-ga": true,
}
_ = want // ignore unused variables.
{
objs, err := mock.AlphaDisks().List(ctx, location, filter.None)
if err != nil {
t.Errorf("AlphaDisks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err)
} else {
got := map[string]bool{}
for _, obj := range objs {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaDisks().List(); got %+v, want %+v", got, want)
}
}
}
{
objs, err := mock.Disks().List(ctx, location, filter.None)
if err != nil {
@ -387,17 +357,11 @@ func TestDisksGroup(t *testing.T) {
}
// Delete across versions.
if err := mock.AlphaDisks().Delete(ctx, keyAlpha); err != nil {
t.Errorf("AlphaDisks().Delete(%v, %v) = %v; want nil", ctx, keyAlpha, err)
}
if err := mock.Disks().Delete(ctx, keyGA); err != nil {
t.Errorf("Disks().Delete(%v, %v) = %v; want nil", ctx, keyGA, err)
}
// Delete not found.
if err := mock.AlphaDisks().Delete(ctx, keyAlpha); err == nil {
t.Errorf("AlphaDisks().Delete(%v, %v) = nil; want error", ctx, keyAlpha)
}
if err := mock.Disks().Delete(ctx, keyGA); err == nil {
t.Errorf("Disks().Delete(%v, %v) = nil; want error", ctx, keyGA)
}
@ -1315,39 +1279,39 @@ func TestRegionDisksGroup(t *testing.T) {
mock := NewMockGCE(pr)
var key *meta.Key
keyAlpha := meta.RegionalKey("key-alpha", "location")
key = keyAlpha
keyBeta := meta.RegionalKey("key-beta", "location")
key = keyBeta
// Ignore unused variables.
_, _, _ = ctx, mock, key
// Get not found.
if _, err := mock.AlphaRegionDisks().Get(ctx, key); err == nil {
t.Errorf("AlphaRegionDisks().Get(%v, %v) = _, nil; want error", ctx, key)
if _, err := mock.BetaRegionDisks().Get(ctx, key); err == nil {
t.Errorf("BetaRegionDisks().Get(%v, %v) = _, nil; want error", ctx, key)
}
// Insert.
{
obj := &alpha.Disk{}
if err := mock.AlphaRegionDisks().Insert(ctx, keyAlpha, obj); err != nil {
t.Errorf("AlphaRegionDisks().Insert(%v, %v, %v) = %v; want nil", ctx, keyAlpha, obj, err)
obj := &beta.Disk{}
if err := mock.BetaRegionDisks().Insert(ctx, keyBeta, obj); err != nil {
t.Errorf("BetaRegionDisks().Insert(%v, %v, %v) = %v; want nil", ctx, keyBeta, obj, err)
}
}
// Get across versions.
if obj, err := mock.AlphaRegionDisks().Get(ctx, key); err != nil {
t.Errorf("AlphaRegionDisks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
if obj, err := mock.BetaRegionDisks().Get(ctx, key); err != nil {
t.Errorf("BetaRegionDisks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
}
// List.
mock.MockAlphaRegionDisks.Objects[*keyAlpha] = mock.MockAlphaRegionDisks.Obj(&alpha.Disk{Name: keyAlpha.Name})
mock.MockBetaRegionDisks.Objects[*keyBeta] = mock.MockBetaRegionDisks.Obj(&beta.Disk{Name: keyBeta.Name})
want := map[string]bool{
"key-alpha": true,
"key-beta": true,
}
_ = want // ignore unused variables.
{
objs, err := mock.AlphaRegionDisks().List(ctx, location, filter.None)
objs, err := mock.BetaRegionDisks().List(ctx, location, filter.None)
if err != nil {
t.Errorf("AlphaRegionDisks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err)
t.Errorf("BetaRegionDisks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err)
} else {
got := map[string]bool{}
for _, obj := range objs {
@ -1360,13 +1324,13 @@ func TestRegionDisksGroup(t *testing.T) {
}
// Delete across versions.
if err := mock.AlphaRegionDisks().Delete(ctx, keyAlpha); err != nil {
t.Errorf("AlphaRegionDisks().Delete(%v, %v) = %v; want nil", ctx, keyAlpha, err)
if err := mock.BetaRegionDisks().Delete(ctx, keyBeta); err != nil {
t.Errorf("BetaRegionDisks().Delete(%v, %v) = %v; want nil", ctx, keyBeta, err)
}
// Delete not found.
if err := mock.AlphaRegionDisks().Delete(ctx, keyAlpha); err == nil {
t.Errorf("AlphaRegionDisks().Delete(%v, %v) = nil; want error", ctx, keyAlpha)
if err := mock.BetaRegionDisks().Delete(ctx, keyBeta); err == nil {
t.Errorf("BetaRegionDisks().Delete(%v, %v) = nil; want error", ctx, keyBeta)
}
}

View File

@ -160,22 +160,20 @@ var AllServices = []*ServiceInfo{
Resource: "disks",
keyType: Zonal,
serviceType: reflect.TypeOf(&ga.DisksService{}),
},
{
Object: "Disk",
Service: "Disks",
Resource: "disks",
version: VersionAlpha,
keyType: Zonal,
serviceType: reflect.TypeOf(&alpha.DisksService{}),
additionalMethods: []string{
"Resize",
},
},
{
Object: "Disk",
Service: "RegionDisks",
Resource: "disks",
version: VersionAlpha,
version: VersionBeta,
keyType: Regional,
serviceType: reflect.TypeOf(&alpha.DisksService{}),
serviceType: reflect.TypeOf(&beta.RegionDisksService{}),
additionalMethods: []string{
"Resize",
},
},
{
Object: "Firewall",

View File

@ -38,6 +38,8 @@ import (
compute "google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
"k8s.io/kubernetes/pkg/features"
)
@ -65,7 +67,7 @@ type diskServiceManager interface {
sizeGb int64,
tagsStr string,
diskType string,
zone string) (gceObject, error)
zone string) error
// Creates a new regional persistent disk on GCE with the given disk spec.
CreateRegionalDiskOnCloudProvider(
@ -73,41 +75,35 @@ type diskServiceManager interface {
sizeGb int64,
tagsStr string,
diskType string,
zones sets.String) (gceObject, error)
zones sets.String) error
// Deletes the persistent disk from GCE with the given diskName.
DeleteDiskOnCloudProvider(zone string, disk string) (gceObject, error)
DeleteDiskOnCloudProvider(zone string, disk string) error
// Deletes the regional persistent disk from GCE with the given diskName.
DeleteRegionalDiskOnCloudProvider(diskName string) (gceObject, error)
DeleteRegionalDiskOnCloudProvider(diskName string) error
// Attach a persistent disk on GCE with the given disk spec to the specified instance.
AttachDiskOnCloudProvider(
disk *GCEDisk,
readWrite string,
instanceZone string,
instanceName string) (gceObject, error)
instanceName string) error
// Detach a persistent disk on GCE with the given disk spec from the specified instance.
DetachDiskOnCloudProvider(
instanceZone string,
instanceName string,
devicePath string) (gceObject, error)
devicePath string) error
ResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64, zone string) (gceObject, error)
RegionalResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64) (gceObject, error)
ResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64, zone string) error
RegionalResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64) error
// Gets the persistent disk from GCE with the given diskName.
GetDiskFromCloudProvider(zone string, diskName string) (*GCEDisk, error)
// Gets the regional persistent disk from GCE with the given diskName.
GetRegionalDiskFromCloudProvider(diskName string) (*GCEDisk, error)
// Waits until GCE reports the given operation in the given zone as done.
WaitForZoneOp(op gceObject, zone string, mc *metricContext) error
// Waits until GCE reports the given operation in the given region is done.
WaitForRegionalOp(op gceObject, mc *metricContext) error
}
type gceServiceManager struct {
@ -121,11 +117,11 @@ func (manager *gceServiceManager) CreateDiskOnCloudProvider(
sizeGb int64,
tagsStr string,
diskType string,
zone string) (gceObject, error) {
zone string) error {
diskTypeURI, err := manager.getDiskTypeURI(
manager.gce.region /* diskRegion */, singleZone{zone}, diskType, false /* useBetaAPI */)
if err != nil {
return nil, err
return err
}
diskToCreateV1 := &compute.Disk{
@ -134,8 +130,10 @@ func (manager *gceServiceManager) CreateDiskOnCloudProvider(
Description: tagsStr,
Type: diskTypeURI,
}
return manager.gce.service.Disks.Insert(
manager.gce.projectID, zone, diskToCreateV1).Do()
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
return manager.gce.c.Disks().Insert(ctx, meta.ZonalKey(name, zone), diskToCreateV1)
}
func (manager *gceServiceManager) CreateRegionalDiskOnCloudProvider(
@ -143,42 +141,44 @@ func (manager *gceServiceManager) CreateRegionalDiskOnCloudProvider(
sizeGb int64,
tagsStr string,
diskType string,
replicaZones sets.String) (gceObject, error) {
replicaZones sets.String) error {
if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
diskTypeURI, err := manager.getDiskTypeURI(
manager.gce.region /* diskRegion */, multiZone{replicaZones}, diskType, true /* useBetaAPI */)
if err != nil {
return nil, err
}
fullyQualifiedReplicaZones := []string{}
for _, replicaZone := range replicaZones.UnsortedList() {
fullyQualifiedReplicaZones = append(
fullyQualifiedReplicaZones, manager.getReplicaZoneURI(replicaZone, true))
}
diskToCreateBeta := &computebeta.Disk{
Name: name,
SizeGb: sizeGb,
Description: tagsStr,
Type: diskTypeURI,
ReplicaZones: fullyQualifiedReplicaZones,
}
return manager.gce.serviceBeta.RegionDisks.Insert(
manager.gce.projectID, manager.gce.region, diskToCreateBeta).Do()
if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
return fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk)
}
return nil, fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk)
diskTypeURI, err := manager.getDiskTypeURI(
manager.gce.region /* diskRegion */, multiZone{replicaZones}, diskType, true /* useBetaAPI */)
if err != nil {
return err
}
fullyQualifiedReplicaZones := []string{}
for _, replicaZone := range replicaZones.UnsortedList() {
fullyQualifiedReplicaZones = append(
fullyQualifiedReplicaZones, manager.getReplicaZoneURI(replicaZone, true))
}
diskToCreateBeta := &computebeta.Disk{
Name: name,
SizeGb: sizeGb,
Description: tagsStr,
Type: diskTypeURI,
ReplicaZones: fullyQualifiedReplicaZones,
}
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
return manager.gce.c.BetaRegionDisks().Insert(ctx, meta.RegionalKey(name, manager.gce.region), diskToCreateBeta)
}
func (manager *gceServiceManager) AttachDiskOnCloudProvider(
disk *GCEDisk,
readWrite string,
instanceZone string,
instanceName string) (gceObject, error) {
instanceName string) error {
source, err := manager.getDiskSourceURI(disk)
if err != nil {
return nil, err
return err
}
attachedDiskV1 := &compute.AttachedDisk{
@ -188,16 +188,19 @@ func (manager *gceServiceManager) AttachDiskOnCloudProvider(
Source: source,
Type: diskTypePersistent,
}
return manager.gce.service.Instances.AttachDisk(
manager.gce.projectID, instanceZone, instanceName, attachedDiskV1).Do()
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
return manager.gce.c.Instances().AttachDisk(ctx, meta.ZonalKey(instanceName, instanceZone), attachedDiskV1)
}
func (manager *gceServiceManager) DetachDiskOnCloudProvider(
instanceZone string,
instanceName string,
devicePath string) (gceObject, error) {
return manager.gce.service.Instances.DetachDisk(
manager.gce.projectID, instanceZone, instanceName, devicePath).Do()
devicePath string) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
return manager.gce.c.Instances().DetachDisk(ctx, meta.ZonalKey(instanceName, instanceZone), devicePath)
}
func (manager *gceServiceManager) GetDiskFromCloudProvider(
@ -211,8 +214,9 @@ func (manager *gceServiceManager) GetDiskFromCloudProvider(
return nil, fmt.Errorf("Can not fetch disk. Zone is specified (%q). But disk name is empty.", zone)
}
diskStable, err := manager.gce.service.Disks.Get(
manager.gce.projectID, zone, diskName).Do()
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
diskStable, err := manager.gce.c.Disks().Get(ctx, meta.ZonalKey(diskName, zone))
if err != nil {
return nil, err
}
@ -240,56 +244,50 @@ func (manager *gceServiceManager) GetDiskFromCloudProvider(
func (manager *gceServiceManager) GetRegionalDiskFromCloudProvider(
diskName string) (*GCEDisk, error) {
if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
diskBeta, err := manager.gce.serviceBeta.RegionDisks.Get(
manager.gce.projectID, manager.gce.region, diskName).Do()
if err != nil {
return nil, err
}
zones := sets.NewString()
for _, zoneURI := range diskBeta.ReplicaZones {
zones.Insert(lastComponent(zoneURI))
}
return &GCEDisk{
ZoneInfo: multiZone{zones},
Region: lastComponent(diskBeta.Region),
Name: diskBeta.Name,
Kind: diskBeta.Kind,
Type: diskBeta.Type,
SizeGb: diskBeta.SizeGb,
}, nil
if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
return nil, fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk)
}
return nil, fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk)
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
diskBeta, err := manager.gce.c.BetaRegionDisks().Get(ctx, meta.RegionalKey(diskName, manager.gce.region))
if err != nil {
return nil, err
}
zones := sets.NewString()
for _, zoneURI := range diskBeta.ReplicaZones {
zones.Insert(lastComponent(zoneURI))
}
return &GCEDisk{
ZoneInfo: multiZone{zones},
Region: lastComponent(diskBeta.Region),
Name: diskBeta.Name,
Kind: diskBeta.Kind,
Type: diskBeta.Type,
SizeGb: diskBeta.SizeGb,
}, nil
}
func (manager *gceServiceManager) DeleteDiskOnCloudProvider(
zone string,
diskName string) (gceObject, error) {
return manager.gce.service.Disks.Delete(
manager.gce.projectID, zone, diskName).Do()
diskName string) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
return manager.gce.c.Disks().Delete(ctx, meta.ZonalKey(diskName, zone))
}
func (manager *gceServiceManager) DeleteRegionalDiskOnCloudProvider(
diskName string) (gceObject, error) {
if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
return manager.gce.serviceBeta.RegionDisks.Delete(
manager.gce.projectID, manager.gce.region, diskName).Do()
diskName string) error {
if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
return fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk)
}
return nil, fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk)
}
func (manager *gceServiceManager) WaitForZoneOp(
op gceObject, zone string, mc *metricContext) error {
return manager.gce.waitForZoneOp(op, zone, mc)
}
func (manager *gceServiceManager) WaitForRegionalOp(
op gceObject, mc *metricContext) error {
return manager.gce.waitForRegionOp(op, manager.gce.region, mc)
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
return manager.gce.c.BetaRegionDisks().Delete(ctx, meta.RegionalKey(diskName, manager.gce.region))
}
func (manager *gceServiceManager) getDiskSourceURI(disk *GCEDisk) (string, error) {
@ -411,21 +409,28 @@ func (manager *gceServiceManager) getRegionFromZone(zoneInfo zoneType) (string,
return region, nil
}
func (manager *gceServiceManager) ResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64, zone string) (gceObject, error) {
func (manager *gceServiceManager) ResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64, zone string) error {
resizeServiceRequest := &compute.DisksResizeRequest{
SizeGb: sizeGb,
}
return manager.gce.service.Disks.Resize(manager.gce.projectID, zone, disk.Name, resizeServiceRequest).Do()
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
return manager.gce.c.Disks().Resize(ctx, meta.ZonalKey(disk.Name, zone), resizeServiceRequest)
}
func (manager *gceServiceManager) RegionalResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64) (gceObject, error) {
if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
resizeServiceRequest := &computebeta.RegionDisksResizeRequest{
SizeGb: sizeGb,
}
return manager.gce.serviceBeta.RegionDisks.Resize(manager.gce.projectID, disk.Region, disk.Name, resizeServiceRequest).Do()
func (manager *gceServiceManager) RegionalResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64) error {
if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
return fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk)
}
return nil, fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk)
resizeServiceRequest := &computebeta.RegionDisksResizeRequest{
SizeGb: sizeGb,
}
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
return manager.gce.c.BetaRegionDisks().Resize(ctx, meta.RegionalKey(disk.Name, disk.Region), resizeServiceRequest)
}
// Disks is interface for manipulation with GCE PDs.
@ -551,14 +556,7 @@ func (gce *GCECloud) AttachDisk(diskName string, nodeName types.NodeName, readOn
readWrite = "READ_ONLY"
}
attachOp, err := gce.manager.AttachDiskOnCloudProvider(
disk, readWrite, instance.Zone, instance.Name)
if err != nil {
return mc.Observe(err)
}
return gce.manager.WaitForZoneOp(attachOp, instance.Zone, mc)
return mc.Observe(gce.manager.AttachDiskOnCloudProvider(disk, readWrite, instance.Zone, instance.Name))
}
func (gce *GCECloud) DetachDisk(devicePath string, nodeName types.NodeName) error {
@ -578,12 +576,7 @@ func (gce *GCECloud) DetachDisk(devicePath string, nodeName types.NodeName) erro
}
mc := newDiskMetricContextZonal("detach", gce.region, inst.Zone)
detachOp, err := gce.manager.DetachDiskOnCloudProvider(inst.Zone, inst.Name, devicePath)
if err != nil {
return mc.Observe(err)
}
return gce.manager.WaitForZoneOp(detachOp, inst.Zone, mc)
return mc.Observe(gce.manager.DetachDiskOnCloudProvider(inst.Zone, inst.Name, devicePath))
}
func (gce *GCECloud) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error) {
@ -671,17 +664,10 @@ func (gce *GCECloud) CreateDisk(
mc := newDiskMetricContextZonal("create", gce.region, zone)
createOp, err := gce.manager.CreateDiskOnCloudProvider(
err = gce.manager.CreateDiskOnCloudProvider(
name, sizeGb, tagsStr, diskType, zone)
if isGCEError(err, "alreadyExists") {
glog.Warningf("GCE PD %q already exists, reusing", name)
return nil
} else if err != nil {
return mc.Observe(err)
}
err = gce.manager.WaitForZoneOp(createOp, zone, mc)
mc.Observe(err)
if isGCEError(err, "alreadyExists") {
glog.Warningf("GCE PD %q already exists, reusing", name)
return nil
@ -719,17 +705,10 @@ func (gce *GCECloud) CreateRegionalDisk(
mc := newDiskMetricContextRegional("create", gce.region)
createOp, err := gce.manager.CreateRegionalDiskOnCloudProvider(
err = gce.manager.CreateRegionalDiskOnCloudProvider(
name, sizeGb, tagsStr, diskType, replicaZones)
if isGCEError(err, "alreadyExists") {
glog.Warningf("GCE PD %q already exists, reusing", name)
return nil
} else if err != nil {
return mc.Observe(err)
}
err = gce.manager.WaitForRegionalOp(createOp, mc)
mc.Observe(err)
if isGCEError(err, "alreadyExists") {
glog.Warningf("GCE PD %q already exists, reusing", name)
return nil
@ -782,31 +761,26 @@ func (gce *GCECloud) ResizeDisk(diskToResize string, oldSize resource.Quantity,
switch zoneInfo := disk.ZoneInfo.(type) {
case singleZone:
mc = newDiskMetricContextZonal("resize", disk.Region, zoneInfo.zone)
resizeOp, err := gce.manager.ResizeDiskOnCloudProvider(disk, requestGB, zoneInfo.zone)
err := gce.manager.ResizeDiskOnCloudProvider(disk, requestGB, zoneInfo.zone)
if err != nil {
return oldSize, mc.Observe(err)
} else {
return newSizeQuant, mc.Observe(err)
}
waitErr := gce.manager.WaitForZoneOp(resizeOp, zoneInfo.zone, mc)
if waitErr != nil {
return oldSize, waitErr
}
return newSizeQuant, nil
case multiZone:
if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
mc = newDiskMetricContextRegional("resize", disk.Region)
resizeOp, err := gce.manager.RegionalResizeDiskOnCloudProvider(disk, requestGB)
if err != nil {
return oldSize, mc.Observe(err)
}
waitErr := gce.manager.WaitForRegionalOp(resizeOp, mc)
if waitErr != nil {
return oldSize, waitErr
}
return newSizeQuant, nil
if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
return oldSize, fmt.Errorf("disk.ZoneInfo has unexpected type %T", zoneInfo)
}
mc = newDiskMetricContextRegional("resize", disk.Region)
err := gce.manager.RegionalResizeDiskOnCloudProvider(disk, requestGB)
if err != nil {
return oldSize, mc.Observe(err)
} else {
return newSizeQuant, mc.Observe(err)
}
return oldSize, fmt.Errorf("disk.ZoneInfo has unexpected type %T", zoneInfo)
case nil:
return oldSize, fmt.Errorf("PD has nil ZoneInfo: %v", disk)
default:
@ -1022,21 +996,14 @@ func (gce *GCECloud) doDeleteDisk(diskToDelete string) error {
switch zoneInfo := disk.ZoneInfo.(type) {
case singleZone:
mc = newDiskMetricContextZonal("delete", disk.Region, zoneInfo.zone)
deleteOp, err := gce.manager.DeleteDiskOnCloudProvider(zoneInfo.zone, disk.Name)
if err != nil {
return mc.Observe(err)
}
return gce.manager.WaitForZoneOp(deleteOp, zoneInfo.zone, mc)
return mc.Observe(gce.manager.DeleteDiskOnCloudProvider(zoneInfo.zone, disk.Name))
case multiZone:
if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
mc = newDiskMetricContextRegional("delete", disk.Region)
deleteOp, err := gce.manager.DeleteRegionalDiskOnCloudProvider(disk.Name)
if err != nil {
return mc.Observe(err)
}
return gce.manager.WaitForRegionalOp(deleteOp, mc)
if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
return fmt.Errorf("disk.ZoneInfo has unexpected type %T", zoneInfo)
}
return fmt.Errorf("disk.ZoneInfo has unexpected type %T", zoneInfo)
mc = newDiskMetricContextRegional("delete", disk.Region)
return mc.Observe(gce.manager.DeleteRegionalDiskOnCloudProvider(disk.Name))
case nil:
return fmt.Errorf("PD has nil ZoneInfo: %v", disk)
default:

View File

@ -70,9 +70,6 @@ func TestCreateDisk_Basic(t *testing.T) {
if !fakeManager.createDiskCalled {
t.Error("Never called GCE disk create.")
}
if !fakeManager.doesOpMatch {
t.Error("Ops used in WaitForZoneOp does not match what's returned by CreateDisk.")
}
// Partial check of equality between disk description sent to GCE and parameters of method.
diskToCreate := fakeManager.diskToCreateStable
@ -127,9 +124,6 @@ func TestCreateRegionalDisk_Basic(t *testing.T) {
if !fakeManager.createDiskCalled {
t.Error("Never called GCE disk create.")
}
if !fakeManager.doesOpMatch {
t.Error("Ops used in WaitForZoneOp does not match what's returned by CreateDisk.")
}
// Partial check of equality between disk description sent to GCE and parameters of method.
diskToCreate := fakeManager.diskToCreateStable
@ -165,7 +159,7 @@ func TestCreateDisk_DiskAlreadyExists(t *testing.T) {
// Inject disk AlreadyExists error.
alreadyExistsError := googleapi.ErrorItem{Reason: "alreadyExists"}
fakeManager.waitForOpError = &googleapi.Error{
fakeManager.opError = &googleapi.Error{
Errors: []googleapi.ErrorItem{alreadyExistsError},
}
@ -314,9 +308,6 @@ func TestDeleteDisk_Basic(t *testing.T) {
if !fakeManager.deleteDiskCalled {
t.Error("Never called GCE disk delete.")
}
if !fakeManager.doesOpMatch {
t.Error("Ops used in WaitForZoneOp does not match what's returned by DeleteDisk.")
}
}
@ -644,16 +635,12 @@ const (
type FakeServiceManager struct {
// Common fields shared among tests
targetAPI targetClientAPI
gceProjectID string
gceRegion string
opAlpha *computealpha.Operation // Mocks an operation returned by GCE API calls
opBeta *computebeta.Operation // Mocks an operation returned by GCE API calls
opStable *compute.Operation // Mocks an operation returned by GCE API calls
doesOpMatch bool
zonalDisks map[string]string // zone: diskName
regionalDisks map[string]sets.String // diskName: zones
waitForOpError error // Error to be returned by WaitForZoneOp or WaitForRegionalOp
targetAPI targetClientAPI
gceProjectID string
gceRegion string
zonalDisks map[string]string // zone: diskName
regionalDisks map[string]sets.String // diskName: zones
opError error
// Fields for TestCreateDisk
createDiskCalled bool
@ -684,12 +671,11 @@ func (manager *FakeServiceManager) CreateDiskOnCloudProvider(
sizeGb int64,
tagsStr string,
diskType string,
zone string) (gceObject, error) {
zone string) error {
manager.createDiskCalled = true
switch t := manager.targetAPI; t {
case targetStable:
manager.opStable = &compute.Operation{}
diskTypeURI := gceComputeAPIEndpoint + "projects/" + fmt.Sprintf(diskTypeURITemplateSingleZone, manager.gceProjectID, zone, diskType)
diskToCreateV1 := &compute.Disk{
Name: name,
@ -699,9 +685,8 @@ func (manager *FakeServiceManager) CreateDiskOnCloudProvider(
}
manager.diskToCreateStable = diskToCreateV1
manager.zonalDisks[zone] = diskToCreateV1.Name
return manager.opStable, nil
return nil
case targetBeta:
manager.opBeta = &computebeta.Operation{}
diskTypeURI := gceComputeAPIEndpoint + "projects/" + fmt.Sprintf(diskTypeURITemplateSingleZone, manager.gceProjectID, zone, diskType)
diskToCreateBeta := &computebeta.Disk{
Name: name,
@ -711,9 +696,8 @@ func (manager *FakeServiceManager) CreateDiskOnCloudProvider(
}
manager.diskToCreateBeta = diskToCreateBeta
manager.zonalDisks[zone] = diskToCreateBeta.Name
return manager.opBeta, nil
return nil
case targetAlpha:
manager.opAlpha = &computealpha.Operation{}
diskTypeURI := gceComputeAPIEndpointBeta + "projects/" + fmt.Sprintf(diskTypeURITemplateSingleZone, manager.gceProjectID, zone, diskType)
diskToCreateAlpha := &computealpha.Disk{
Name: name,
@ -723,9 +707,9 @@ func (manager *FakeServiceManager) CreateDiskOnCloudProvider(
}
manager.diskToCreateAlpha = diskToCreateAlpha
manager.zonalDisks[zone] = diskToCreateAlpha.Name
return manager.opAlpha, nil
return nil
default:
return nil, fmt.Errorf("unexpected type: %T", t)
return fmt.Errorf("unexpected type: %T", t)
}
}
@ -738,13 +722,12 @@ func (manager *FakeServiceManager) CreateRegionalDiskOnCloudProvider(
sizeGb int64,
tagsStr string,
diskType string,
zones sets.String) (gceObject, error) {
zones sets.String) error {
manager.createDiskCalled = true
diskTypeURI := gceComputeAPIEndpointBeta + "projects/" + fmt.Sprintf(diskTypeURITemplateRegional, manager.gceProjectID, manager.gceRegion, diskType)
switch t := manager.targetAPI; t {
case targetStable:
manager.opStable = &compute.Operation{}
diskToCreateV1 := &compute.Disk{
Name: name,
SizeGb: sizeGb,
@ -753,13 +736,13 @@ func (manager *FakeServiceManager) CreateRegionalDiskOnCloudProvider(
}
manager.diskToCreateStable = diskToCreateV1
manager.regionalDisks[diskToCreateV1.Name] = zones
return manager.opStable, nil
return nil
case targetBeta:
return nil, fmt.Errorf("RegionalDisk CreateDisk op not supported in beta.")
return fmt.Errorf("RegionalDisk CreateDisk op not supported in beta.")
case targetAlpha:
return nil, fmt.Errorf("RegionalDisk CreateDisk op not supported in alpha.")
return fmt.Errorf("RegionalDisk CreateDisk op not supported in alpha.")
default:
return nil, fmt.Errorf("unexpected type: %T", t)
return fmt.Errorf("unexpected type: %T", t)
}
}
@ -767,39 +750,33 @@ func (manager *FakeServiceManager) AttachDiskOnCloudProvider(
disk *GCEDisk,
readWrite string,
instanceZone string,
instanceName string) (gceObject, error) {
instanceName string) error {
switch t := manager.targetAPI; t {
case targetStable:
manager.opStable = &compute.Operation{}
return manager.opStable, nil
return nil
case targetBeta:
manager.opBeta = &computebeta.Operation{}
return manager.opBeta, nil
return nil
case targetAlpha:
manager.opAlpha = &computealpha.Operation{}
return manager.opAlpha, nil
return nil
default:
return nil, fmt.Errorf("unexpected type: %T", t)
return fmt.Errorf("unexpected type: %T", t)
}
}
func (manager *FakeServiceManager) DetachDiskOnCloudProvider(
instanceZone string,
instanceName string,
devicePath string) (gceObject, error) {
devicePath string) error {
switch t := manager.targetAPI; t {
case targetStable:
manager.opStable = &compute.Operation{}
return manager.opStable, nil
return nil
case targetBeta:
manager.opBeta = &computebeta.Operation{}
return manager.opBeta, nil
return nil
case targetAlpha:
manager.opAlpha = &computealpha.Operation{}
return manager.opAlpha, nil
return nil
default:
return nil, fmt.Errorf("unexpected type: %T", t)
return fmt.Errorf("unexpected type: %T", t)
}
}
@ -856,13 +833,13 @@ func (manager *FakeServiceManager) GetRegionalDiskFromCloudProvider(
func (manager *FakeServiceManager) ResizeDiskOnCloudProvider(
disk *GCEDisk,
size int64,
zone string) (gceObject, error) {
zone string) error {
panic("Not implmented")
}
func (manager *FakeServiceManager) RegionalResizeDiskOnCloudProvider(
disk *GCEDisk,
size int64) (gceObject, error) {
size int64) error {
panic("Not implemented")
}
@ -871,91 +848,41 @@ func (manager *FakeServiceManager) RegionalResizeDiskOnCloudProvider(
*/
func (manager *FakeServiceManager) DeleteDiskOnCloudProvider(
zone string,
disk string) (gceObject, error) {
disk string) error {
manager.deleteDiskCalled = true
delete(manager.zonalDisks, zone)
switch t := manager.targetAPI; t {
case targetStable:
manager.opStable = &compute.Operation{}
return manager.opStable, nil
return nil
case targetBeta:
manager.opBeta = &computebeta.Operation{}
return manager.opBeta, nil
return nil
case targetAlpha:
manager.opAlpha = &computealpha.Operation{}
return manager.opAlpha, nil
return nil
default:
return nil, fmt.Errorf("unexpected type: %T", t)
return fmt.Errorf("unexpected type: %T", t)
}
}
func (manager *FakeServiceManager) DeleteRegionalDiskOnCloudProvider(
disk string) (gceObject, error) {
disk string) error {
manager.deleteDiskCalled = true
delete(manager.regionalDisks, disk)
switch t := manager.targetAPI; t {
case targetStable:
manager.opStable = &compute.Operation{}
return manager.opStable, nil
return nil
case targetBeta:
manager.opBeta = &computebeta.Operation{}
return manager.opBeta, nil
return nil
case targetAlpha:
manager.opAlpha = &computealpha.Operation{}
return manager.opAlpha, nil
return nil
default:
return nil, fmt.Errorf("unexpected type: %T", t)
return fmt.Errorf("unexpected type: %T", t)
}
}
func (manager *FakeServiceManager) WaitForZoneOp(
op gceObject,
zone string,
mc *metricContext) error {
switch v := op.(type) {
case *computealpha.Operation:
if op.(*computealpha.Operation) == manager.opAlpha {
manager.doesOpMatch = true
}
case *computebeta.Operation:
if op.(*computebeta.Operation) == manager.opBeta {
manager.doesOpMatch = true
}
case *compute.Operation:
if op.(*compute.Operation) == manager.opStable {
manager.doesOpMatch = true
}
default:
return fmt.Errorf("unexpected type: %T", v)
}
return manager.waitForOpError
}
func (manager *FakeServiceManager) WaitForRegionalOp(
op gceObject, mc *metricContext) error {
switch v := op.(type) {
case *computealpha.Operation:
if op.(*computealpha.Operation) == manager.opAlpha {
manager.doesOpMatch = true
}
case *computebeta.Operation:
if op.(*computebeta.Operation) == manager.opBeta {
manager.doesOpMatch = true
}
case *compute.Operation:
if op.(*compute.Operation) == manager.opStable {
manager.doesOpMatch = true
}
default:
return fmt.Errorf("unexpected type: %T", v)
}
return manager.waitForOpError
}
func createNodeZones(zones []string) map[string]sets.String {
nodeZones := map[string]sets.String{}
for _, zone := range zones {

View File

@ -1,180 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gce
import (
"encoding/json"
"fmt"
"time"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/golang/glog"
computealpha "google.golang.org/api/compute/v0.alpha"
computebeta "google.golang.org/api/compute/v0.beta"
computev1 "google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
)
func (gce *GCECloud) waitForOp(op *computev1.Operation, getOperation func(operationName string) (*computev1.Operation, error), mc *metricContext) error {
if op == nil {
return mc.Observe(fmt.Errorf("operation must not be nil"))
}
if opIsDone(op) {
return getErrorFromOp(op)
}
opStart := time.Now()
opName := op.Name
return wait.Poll(operationPollInterval, operationPollTimeoutDuration, func() (bool, error) {
start := time.Now()
gce.operationPollRateLimiter.Accept()
duration := time.Since(start)
if duration > 5*time.Second {
glog.V(2).Infof("pollOperation: throttled %v for %v", duration, opName)
}
pollOp, err := getOperation(opName)
if err != nil {
glog.Warningf("GCE poll operation %s failed: pollOp: [%v] err: [%v] getErrorFromOp: [%v]",
opName, pollOp, err, getErrorFromOp(pollOp))
}
done := opIsDone(pollOp)
if done {
duration := time.Since(opStart)
if duration > 1*time.Minute {
// Log the JSON. It's cleaner than the %v structure.
enc, err := pollOp.MarshalJSON()
if err != nil {
glog.Warningf("waitForOperation: long operation (%v): %v (failed to encode to JSON: %v)",
duration, pollOp, err)
} else {
glog.V(2).Infof("waitForOperation: long operation (%v): %v",
duration, string(enc))
}
}
}
return done, mc.Observe(getErrorFromOp(pollOp))
})
}
func opIsDone(op *computev1.Operation) bool {
return op != nil && op.Status == "DONE"
}
func getErrorFromOp(op *computev1.Operation) error {
if op != nil && op.Error != nil && len(op.Error.Errors) > 0 {
err := &googleapi.Error{
Code: int(op.HttpErrorStatusCode),
Message: op.Error.Errors[0].Message,
}
glog.Errorf("GCE operation failed: %v", err)
return err
}
return nil
}
func (gce *GCECloud) waitForGlobalOp(op gceObject, mc *metricContext) error {
return gce.waitForGlobalOpInProject(op, gce.ProjectID(), mc)
}
func (gce *GCECloud) waitForRegionOp(op gceObject, region string, mc *metricContext) error {
return gce.waitForRegionOpInProject(op, gce.ProjectID(), region, mc)
}
func (gce *GCECloud) waitForZoneOp(op gceObject, zone string, mc *metricContext) error {
return gce.waitForZoneOpInProject(op, gce.ProjectID(), zone, mc)
}
func (gce *GCECloud) waitForGlobalOpInProject(op gceObject, projectID string, mc *metricContext) error {
switch v := op.(type) {
case *computealpha.Operation:
return gce.waitForOp(convertToV1Operation(op), func(operationName string) (*computev1.Operation, error) {
op, err := gce.serviceAlpha.GlobalOperations.Get(projectID, operationName).Do()
return convertToV1Operation(op), err
}, mc)
case *computebeta.Operation:
return gce.waitForOp(convertToV1Operation(op), func(operationName string) (*computev1.Operation, error) {
op, err := gce.serviceBeta.GlobalOperations.Get(projectID, operationName).Do()
return convertToV1Operation(op), err
}, mc)
case *computev1.Operation:
return gce.waitForOp(op.(*computev1.Operation), func(operationName string) (*computev1.Operation, error) {
return gce.service.GlobalOperations.Get(projectID, operationName).Do()
}, mc)
default:
return fmt.Errorf("unexpected type: %T", v)
}
}
func (gce *GCECloud) waitForRegionOpInProject(op gceObject, projectID, region string, mc *metricContext) error {
switch v := op.(type) {
case *computealpha.Operation:
return gce.waitForOp(convertToV1Operation(op), func(operationName string) (*computev1.Operation, error) {
op, err := gce.serviceAlpha.RegionOperations.Get(projectID, region, operationName).Do()
return convertToV1Operation(op), err
}, mc)
case *computebeta.Operation:
return gce.waitForOp(convertToV1Operation(op), func(operationName string) (*computev1.Operation, error) {
op, err := gce.serviceBeta.RegionOperations.Get(projectID, region, operationName).Do()
return convertToV1Operation(op), err
}, mc)
case *computev1.Operation:
return gce.waitForOp(op.(*computev1.Operation), func(operationName string) (*computev1.Operation, error) {
return gce.service.RegionOperations.Get(projectID, region, operationName).Do()
}, mc)
default:
return fmt.Errorf("unexpected type: %T", v)
}
}
func (gce *GCECloud) waitForZoneOpInProject(op gceObject, projectID, zone string, mc *metricContext) error {
switch v := op.(type) {
case *computealpha.Operation:
return gce.waitForOp(convertToV1Operation(op), func(operationName string) (*computev1.Operation, error) {
op, err := gce.serviceAlpha.ZoneOperations.Get(projectID, zone, operationName).Do()
return convertToV1Operation(op), err
}, mc)
case *computebeta.Operation:
return gce.waitForOp(convertToV1Operation(op), func(operationName string) (*computev1.Operation, error) {
op, err := gce.serviceBeta.ZoneOperations.Get(projectID, zone, operationName).Do()
return convertToV1Operation(op), err
}, mc)
case *computev1.Operation:
return gce.waitForOp(op.(*computev1.Operation), func(operationName string) (*computev1.Operation, error) {
return gce.service.ZoneOperations.Get(projectID, zone, operationName).Do()
}, mc)
default:
return fmt.Errorf("unexpected type: %T", v)
}
}
func convertToV1Operation(object gceObject) *computev1.Operation {
enc, err := object.MarshalJSON()
if err != nil {
panic(fmt.Sprintf("Failed to encode to json: %v", err))
}
var op computev1.Operation
if err := json.Unmarshal(enc, &op); err != nil {
panic(fmt.Sprintf("Failed to convert GCE apiObject %v to v1 operation: %v", object, err))
}
return &op
}

View File

@ -18,16 +18,12 @@ package gce
import (
"context"
"encoding/json"
"reflect"
"strings"
"testing"
"golang.org/x/oauth2/google"
computealpha "google.golang.org/api/compute/v0.alpha"
computebeta "google.golang.org/api/compute/v0.beta"
computev1 "google.golang.org/api/compute/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
)
@ -480,56 +476,6 @@ func TestGenerateCloudConfigs(t *testing.T) {
}
}
func TestConvertToV1Operation(t *testing.T) {
v1Op := getTestOperation()
enc, _ := v1Op.MarshalJSON()
var op interface{}
var alphaOp computealpha.Operation
var betaOp computebeta.Operation
if err := json.Unmarshal(enc, &alphaOp); err != nil {
t.Errorf("Failed to unmarshal operation: %v", err)
}
if err := json.Unmarshal(enc, &betaOp); err != nil {
t.Errorf("Failed to unmarshal operation: %v", err)
}
op = convertToV1Operation(&alphaOp)
if _, ok := op.(*computev1.Operation); ok {
if !reflect.DeepEqual(op, v1Op) {
t.Errorf("Failed to maintain consistency across conversion")
}
} else {
t.Errorf("Expect output to be type v1 operation, but got %v", op)
}
op = convertToV1Operation(&betaOp)
if _, ok := op.(*computev1.Operation); ok {
if !reflect.DeepEqual(op, v1Op) {
t.Errorf("Failed to maintain consistency across conversion")
}
} else {
t.Errorf("Expect output to be type v1 operation, but got %v", op)
}
}
func getTestOperation() *computev1.Operation {
return &computev1.Operation{
Name: "test",
Description: "test",
Id: uint64(12345),
Error: &computev1.OperationError{
Errors: []*computev1.OperationErrorErrors{
{
Code: "555",
Message: "error",
},
},
},
}
}
func TestNewAlphaFeatureGate(t *testing.T) {
testCases := []struct {
alphaFeatures []string