Add Azure managed disk support

pull/6/head
Khaled Henidak & Andy Zhang 2017-07-13 19:55:32 +08:00 committed by Dong Liu
parent 74f1943774
commit 677e593d86
30 changed files with 4308 additions and 1206 deletions

5
Godeps/Godeps.json generated
View File

@ -38,6 +38,11 @@
"Comment": "v10.0.4-beta-1-g786cc84",
"Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad"
},
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/arm/disk",
"Comment": "v10.0.4-beta-1-g786cc84",
"Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad"
},
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/arm/network",
"Comment": "v10.0.4-beta-1-g786cc84",

210
Godeps/LICENSES generated
View File

@ -8952,6 +8952,216 @@ SOFTWARE.
================================================================================
================================================================================
= vendor/github.com/Azure/azure-sdk-for-go/arm/disk licensed under: =
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016 Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
= vendor/github.com/Azure/azure-sdk-for-go/LICENSE cce6fd055830ca30ff78fdf077e870d6 -
================================================================================
================================================================================
= vendor/github.com/Azure/azure-sdk-for-go/arm/network licensed under: =

View File

@ -24,3 +24,4 @@ valid_flag
retry_time
file_content_in_loop
break_on_expected_content
Premium_LRS

View File

@ -13,18 +13,19 @@ go_library(
srcs = [
"azure.go",
"azure_backoff.go",
"azure_blob.go",
"azure_blobDiskController.go",
"azure_controllerCommon.go",
"azure_file.go",
"azure_instance_metadata.go",
"azure_instances.go",
"azure_loadbalancer.go",
"azure_managedDiskController.go",
"azure_routes.go",
"azure_storage.go",
"azure_storageaccount.go",
"azure_util.go",
"azure_wrap.go",
"azure_zones.go",
"vhd.go",
],
tags = ["automanaged"],
deps = [
@ -34,6 +35,7 @@ go_library(
"//pkg/version:go_default_library",
"//pkg/volume:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/disk:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/network:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/storage:go_default_library",

View File

@ -30,6 +30,7 @@ import (
"k8s.io/kubernetes/pkg/version"
"github.com/Azure/azure-sdk-for-go/arm/compute"
"github.com/Azure/azure-sdk-for-go/arm/disk"
"github.com/Azure/azure-sdk-for-go/arm/network"
"github.com/Azure/azure-sdk-for-go/arm/storage"
"github.com/Azure/go-autorest/autorest"
@ -122,8 +123,13 @@ type Cloud struct {
SecurityGroupsClient network.SecurityGroupsClient
VirtualMachinesClient compute.VirtualMachinesClient
StorageAccountClient storage.AccountsClient
DisksClient disk.DisksClient
operationPollRateLimiter flowcontrol.RateLimiter
resourceRequestBackoff wait.Backoff
*BlobDiskController
*ManagedDiskController
*controllerCommon
}
func init() {
@ -255,6 +261,11 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
az.StorageAccountClient = storage.NewAccountsClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID)
az.StorageAccountClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
configureUserAgent(&az.StorageAccountClient.Client)
az.DisksClient = disk.NewDisksClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID)
az.DisksClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
configureUserAgent(&az.DisksClient.Client)
// Conditionally configure rate limits
if az.CloudProviderRateLimit {
@ -304,6 +315,9 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
az.CloudProviderBackoffJitter)
}
if err := initDiskControllers(&az); err != nil {
return nil, err
}
return &az, nil
}
@ -353,3 +367,42 @@ func configureUserAgent(client *autorest.Client) {
k8sVersion := version.Get().GitVersion
client.UserAgent = fmt.Sprintf("%s; kubernetes-cloudprovider/%s", client.UserAgent, k8sVersion)
}
func initDiskControllers(az *Cloud) error {
// Common controller contains the function
// needed by both blob disk and managed disk controllers
common := &controllerCommon{
aadResourceEndPoint: az.Environment.ServiceManagementEndpoint,
clientID: az.AADClientID,
clientSecret: az.AADClientSecret,
location: az.Location,
storageEndpointSuffix: az.Environment.StorageEndpointSuffix,
managementEndpoint: az.Environment.ResourceManagerEndpoint,
resourceGroup: az.ResourceGroup,
tenantID: az.TenantID,
tokenEndPoint: az.Environment.ActiveDirectoryEndpoint,
subscriptionID: az.SubscriptionID,
cloud: az,
}
// BlobDiskController: contains the function needed to
// create/attach/detach/delete blob based (unmanaged disks)
blobController, err := newBlobDiskController(common)
if err != nil {
return fmt.Errorf("AzureDisk - failed to init Blob Disk Controller with error (%s)", err.Error())
}
// ManagedDiskController: contains the functions needed to
// create/attach/detach/delete managed disks
managedController, err := newManagedDiskController(common)
if err != nil {
return fmt.Errorf("AzureDisk - failed to init Managed Disk Controller with error (%s)", err.Error())
}
az.BlobDiskController = blobController
az.ManagedDiskController = managedController
az.controllerCommon = common
return nil
}

View File

@ -1,126 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
"regexp"
"strings"
"bytes"
azs "github.com/Azure/azure-sdk-for-go/storage"
)
const (
vhdContainerName = "vhds"
useHTTPS = true
blobServiceName = "blob"
)
// create page blob
func (az *Cloud) createVhdBlob(accountName, accountKey, name string, sizeGB int64, tags map[string]string) (string, string, error) {
blobClient, err := az.getBlobClient(accountName, accountKey)
if err != nil {
return "", "", err
}
size := 1024 * 1024 * 1024 * sizeGB
vhdSize := size + vhdHeaderSize /* header size */
// Blob name in URL must end with '.vhd' extension.
name = name + ".vhd"
cnt := blobClient.GetContainerReference(vhdContainerName)
b := cnt.GetBlobReference(name)
b.Properties.ContentLength = vhdSize
b.Metadata = tags
err = b.PutPageBlob(nil)
if err != nil {
// if container doesn't exist, create one and retry PutPageBlob
detail := err.Error()
if strings.Contains(detail, errContainerNotFound) {
err = cnt.Create(&azs.CreateContainerOptions{Access: azs.ContainerAccessTypePrivate})
if err == nil {
b := cnt.GetBlobReference(name)
b.Properties.ContentLength = vhdSize
b.Metadata = tags
err = b.PutPageBlob(nil)
}
}
}
if err != nil {
return "", "", fmt.Errorf("failed to put page blob: %v", err)
}
// add VHD signature to the blob
h, err := createVHDHeader(uint64(size))
if err != nil {
az.deleteVhdBlob(accountName, accountKey, name)
return "", "", fmt.Errorf("failed to create vhd header, err: %v", err)
}
blobRange := azs.BlobRange{
Start: uint64(size),
End: uint64(vhdSize - 1),
}
if err = b.WriteRange(blobRange, bytes.NewBuffer(h[:vhdHeaderSize]), nil); err != nil {
az.deleteVhdBlob(accountName, accountKey, name)
return "", "", fmt.Errorf("failed to update vhd header, err: %v", err)
}
scheme := "http"
if useHTTPS {
scheme = "https"
}
host := fmt.Sprintf("%s://%s.%s.%s", scheme, accountName, blobServiceName, az.Environment.StorageEndpointSuffix)
uri := fmt.Sprintf("%s/%s/%s", host, vhdContainerName, name)
return name, uri, nil
}
// delete a vhd blob
func (az *Cloud) deleteVhdBlob(accountName, accountKey, blobName string) error {
blobClient, err := az.getBlobClient(accountName, accountKey)
if err == nil {
cnt := blobClient.GetContainerReference(vhdContainerName)
b := cnt.GetBlobReference(blobName)
return b.Delete(nil)
}
return err
}
func (az *Cloud) getBlobClient(accountName, accountKey string) (*azs.BlobStorageClient, error) {
client, err := azs.NewClient(accountName, accountKey, az.Environment.StorageEndpointSuffix, azs.DefaultAPIVersion, useHTTPS)
if err != nil {
return nil, fmt.Errorf("error creating azure client: %v", err)
}
b := client.GetBlobService()
return &b, nil
}
// get uri https://foo.blob.core.windows.net/vhds/bar.vhd and return foo (account) and bar.vhd (blob name)
func (az *Cloud) getBlobNameAndAccountFromURI(uri string) (string, string, error) {
scheme := "http"
if useHTTPS {
scheme = "https"
}
host := fmt.Sprintf("%s://(.*).%s.%s", scheme, blobServiceName, az.Environment.StorageEndpointSuffix)
reStr := fmt.Sprintf("%s/%s/(.*)", host, vhdContainerName)
re := regexp.MustCompile(reStr)
res := re.FindSubmatch([]byte(uri))
if len(res) < 3 {
return "", "", fmt.Errorf("invalid vhd URI for regex %s: %s", reStr, uri)
}
return string(res[1]), string(res[2]), nil
}

View File

@ -0,0 +1,808 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"bytes"
"encoding/binary"
"fmt"
"math"
"net/url"
"os"
"regexp"
"sync"
"strconv"
"strings"
"sync/atomic"
"time"
storage "github.com/Azure/azure-sdk-for-go/arm/storage"
azstorage "github.com/Azure/azure-sdk-for-go/storage"
"github.com/Azure/go-autorest/autorest/to"
"github.com/golang/glog"
"github.com/rubiojr/go-vhd/vhd"
kwait "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/volume"
)
const (
vhdContainerName = "vhds"
useHTTPSForBlobBasedDisk = true
blobServiceName = "blob"
)
type storageAccountState struct {
name string
saType storage.SkuName
key string
diskCount int32
isValidating int32
defaultContainerCreated bool
}
//BlobDiskController : blob disk controller struct
type BlobDiskController struct {
common *controllerCommon
accounts map[string]*storageAccountState
}
var defaultContainerName = ""
var storageAccountNamePrefix = ""
var storageAccountNameMatch = ""
var initFlag int64
var accountsLock = &sync.Mutex{}
func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error) {
c := BlobDiskController{common: common}
err := c.init()
if err != nil {
return nil, err
}
return &c, nil
}
// CreateVolume creates a VHD blob in a given storage account, will create the given storage account if it does not exist in current resource group
func (c *BlobDiskController) CreateVolume(name, storageAccount string, storageAccountType storage.SkuName, location string, requestGB int) (string, string, int, error) {
key, err := c.common.cloud.getStorageAccesskey(storageAccount)
if err != nil {
glog.V(2).Infof("azureDisk - no key found for storage account %s in resource group %s, begin to create a new storage account", storageAccount, c.common.resourceGroup)
cp := storage.AccountCreateParameters{
Sku: &storage.Sku{Name: storageAccountType},
Tags: &map[string]*string{"created-by": to.StringPtr("azure-dd")},
Location: &location}
cancel := make(chan struct{})
_, errchan := c.common.cloud.StorageAccountClient.Create(c.common.resourceGroup, storageAccount, cp, cancel)
err = <-errchan
if err != nil {
return "", "", 0, fmt.Errorf(fmt.Sprintf("Create Storage Account %s, error: %s", storageAccount, err))
}
key, err = c.common.cloud.getStorageAccesskey(storageAccount)
if err != nil {
return "", "", 0, fmt.Errorf("no key found for storage account %s even after creating a new storage account", storageAccount)
}
glog.Errorf("no key found for storage account %s in resource group %s", storageAccount, c.common.resourceGroup)
return "", "", 0, err
}
client, err := azstorage.NewBasicClient(storageAccount, key)
if err != nil {
return "", "", 0, err
}
blobClient := client.GetBlobService()
container := blobClient.GetContainerReference(vhdContainerName)
_, err = container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate})
if err != nil {
return "", "", 0, err
}
diskName, diskURI, err := c.createVHDBlobDisk(blobClient, storageAccount, name, vhdContainerName, int64(requestGB))
if err != nil {
return "", "", 0, err
}
glog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI)
return diskName, diskURI, requestGB, err
}
// DeleteVolume deletes a VHD blob
func (c *BlobDiskController) DeleteVolume(diskURI string) error {
glog.V(4).Infof("azureDisk - begin to delete volume %s", diskURI)
accountName, blob, err := c.common.cloud.getBlobNameAndAccountFromURI(diskURI)
if err != nil {
return fmt.Errorf("failed to parse vhd URI %v", err)
}
key, err := c.common.cloud.getStorageAccesskey(accountName)
if err != nil {
return fmt.Errorf("no key for storage account %s, err %v", accountName, err)
}
err = c.common.cloud.deleteVhdBlob(accountName, key, blob)
if err != nil {
glog.Warningf("azureDisk - failed to delete blob %s err: %v", diskURI, err)
detail := err.Error()
if strings.Contains(detail, errLeaseIDMissing) {
// disk is still being used
// see https://msdn.microsoft.com/en-us/library/microsoft.windowsazure.storage.blob.protocol.bloberrorcodestrings.leaseidmissing.aspx
return volume.NewDeletedVolumeInUseError(fmt.Sprintf("disk %q is still in use while being deleted", diskURI))
}
return fmt.Errorf("failed to delete vhd %v, account %s, blob %s, err: %v", diskURI, accountName, blob, err)
}
glog.V(4).Infof("azureDisk - blob %s deleted", diskURI)
return nil
}
// get diskURI https://foo.blob.core.windows.net/vhds/bar.vhd and return foo (account) and bar.vhd (blob name)
func (c *BlobDiskController) getBlobNameAndAccountFromURI(diskURI string) (string, string, error) {
scheme := "http"
if useHTTPSForBlobBasedDisk {
scheme = "https"
}
host := fmt.Sprintf("%s://(.*).%s.%s", scheme, blobServiceName, c.common.storageEndpointSuffix)
reStr := fmt.Sprintf("%s/%s/(.*)", host, vhdContainerName)
re := regexp.MustCompile(reStr)
res := re.FindSubmatch([]byte(diskURI))
if len(res) < 3 {
return "", "", fmt.Errorf("invalid vhd URI for regex %s: %s", reStr, diskURI)
}
return string(res[1]), string(res[2]), nil
}
func (c *BlobDiskController) createVHDBlobDisk(blobClient azstorage.BlobStorageClient, accountName, vhdName, containerName string, sizeGB int64) (string, string, error) {
container := blobClient.GetContainerReference(containerName)
_, err := container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate})
if err != nil {
return "", "", err
}
size := 1024 * 1024 * 1024 * sizeGB
vhdSize := size + vhd.VHD_HEADER_SIZE /* header size */
// Blob name in URL must end with '.vhd' extension.
vhdName = vhdName + ".vhd"
tags := make(map[string]string)
tags["createdby"] = "k8sAzureDataDisk"
glog.V(4).Infof("azureDisk - creating page blob %name in container %s account %s", vhdName, containerName, accountName)
blob := container.GetBlobReference(vhdName)
blob.Properties.ContentLength = vhdSize
blob.Metadata = tags
err = blob.PutPageBlob(nil)
if err != nil {
return "", "", fmt.Errorf("failed to put page blob %s in container %s: %v", vhdName, containerName, err)
}
// add VHD signature to the blob
h, err := createVHDHeader(uint64(size))
if err != nil {
blob.DeleteIfExists(nil)
return "", "", fmt.Errorf("failed to create vhd header, err: %v", err)
}
blobRange := azstorage.BlobRange{
Start: uint64(size),
End: uint64(vhdSize - 1),
}
if err = blob.WriteRange(blobRange, bytes.NewBuffer(h[:vhd.VHD_HEADER_SIZE]), nil); err != nil {
glog.Infof("azureDisk - failed to put header page for data disk %s in container %s account %s, error was %s\n",
vhdName, containerName, accountName, err.Error())
return "", "", err
}
scheme := "http"
if useHTTPSForBlobBasedDisk {
scheme = "https"
}
host := fmt.Sprintf("%s://%s.%s.%s", scheme, accountName, blobServiceName, c.common.storageEndpointSuffix)
uri := fmt.Sprintf("%s/%s/%s", host, containerName, vhdName)
return vhdName, uri, nil
}
// delete a vhd blob
func (c *BlobDiskController) deleteVhdBlob(accountName, accountKey, blobName string) error {
client, err := azstorage.NewBasicClient(accountName, accountKey)
if err != nil {
return err
}
blobSvc := client.GetBlobService()
container := blobSvc.GetContainerReference(vhdContainerName)
blob := container.GetBlobReference(blobName)
return blob.Delete(nil)
}
//CreateBlobDisk : create a blob disk in a node
func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int, forceStandAlone bool) (string, error) {
glog.V(4).Infof("azureDisk - creating blob data disk named:%s on StorageAccountType:%s StandAlone:%v", dataDiskName, storageAccountType, forceStandAlone)
var storageAccountName = ""
var err error
if forceStandAlone {
// we have to wait until the storage account is is created
storageAccountName = "p" + MakeCRC32(c.common.subscriptionID+c.common.resourceGroup+dataDiskName)
err = c.createStorageAccount(storageAccountName, storageAccountType, c.common.location, false)
if err != nil {
return "", err
}
} else {
storageAccountName, err = c.findSANameForDisk(storageAccountType)
if err != nil {
return "", err
}
}
blobClient, err := c.getBlobSvcClient(storageAccountName)
if err != nil {
return "", err
}
_, diskURI, err := c.createVHDBlobDisk(blobClient, storageAccountName, dataDiskName, defaultContainerName, int64(sizeGB))
if err != nil {
return "", err
}
if !forceStandAlone {
atomic.AddInt32(&c.accounts[storageAccountName].diskCount, 1)
}
return diskURI, nil
}
//DeleteBlobDisk : delete a blob disk from a node
func (c *BlobDiskController) DeleteBlobDisk(diskURI string, wasForced bool) error {
storageAccountName, vhdName, err := diskNameandSANameFromURI(diskURI)
if err != nil {
return err
}
_, ok := c.accounts[storageAccountName]
if !ok {
// the storage account is specified by user
glog.V(4).Infof("azureDisk - deleting volume %s", diskURI)
return c.DeleteVolume(diskURI)
}
// if forced (as in one disk = one storage account)
// delete the account completely
if wasForced {
return c.deleteStorageAccount(storageAccountName)
}
blobSvc, err := c.getBlobSvcClient(storageAccountName)
if err != nil {
return err
}
glog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, defaultContainerName)
container := blobSvc.GetContainerReference(defaultContainerName)
blob := container.GetBlobReference(vhdName)
_, err = blob.DeleteIfExists(nil)
if c.accounts[storageAccountName].diskCount == -1 {
if diskCount, err := c.getDiskCount(storageAccountName); err != nil {
c.accounts[storageAccountName].diskCount = int32(diskCount)
} else {
glog.Warningf("azureDisk - failed to get disk count for %s however the delete disk operation was ok", storageAccountName)
return nil // we have failed to aquire a new count. not an error condition
}
}
atomic.AddInt32(&c.accounts[storageAccountName].diskCount, -1)
return err
}
// Init tries best effort to ensure that 2 accounts standard/premium were created
// to be used by shared blob disks. This to increase the speed pvc provisioning (in most of cases)
func (c *BlobDiskController) init() error {
if !c.shouldInit() {
return nil
}
c.setUniqueStrings()
// get accounts
accounts, err := c.getAllStorageAccounts()
if err != nil {
return err
}
c.accounts = accounts
if len(c.accounts) == 0 {
counter := 1
for counter <= storageAccountsCountInit {
accountType := storage.PremiumLRS
if n := math.Mod(float64(counter), 2); n == 0 {
accountType = storage.StandardLRS
}
// We don't really care if these calls failed
// at this stage, we are trying to ensure 2 accounts (Standard/Premium)
// are there ready for PVC creation
// if we failed here, the accounts will be created in the process
// of creating PVC
// nor do we care if they were partially created, as the entire
// account creation process is idempotent
go func(thisNext int) {
newAccountName := getAccountNameForNum(thisNext)
glog.Infof("azureDisk - BlobDiskController init process will create new storageAccount:%s type:%s", newAccountName, accountType)
err := c.createStorageAccount(newAccountName, accountType, c.common.location, true)
// TODO return created and error from
if err != nil {
glog.Infof("azureDisk - BlobDiskController init: create account %s with error:%s", newAccountName, err.Error())
} else {
glog.Infof("azureDisk - BlobDiskController init: created account %s", newAccountName)
}
}(counter)
counter = counter + 1
}
}
return nil
}
//Sets unique strings to be used as accountnames && || blob containers names
func (c *BlobDiskController) setUniqueStrings() {
uniqueString := c.common.resourceGroup + c.common.location + c.common.subscriptionID
hash := MakeCRC32(uniqueString)
//used to generate a unqie container name used by this cluster PVC
defaultContainerName = hash
storageAccountNamePrefix = fmt.Sprintf(storageAccountNameTemplate, hash)
// Used to filter relevant accounts (accounts used by shared PVC)
storageAccountNameMatch = storageAccountNamePrefix
// Used as a template to create new names for relevant accounts
storageAccountNamePrefix = storageAccountNamePrefix + "%s"
}
func (c *BlobDiskController) getStorageAccountKey(SAName string) (string, error) {
if account, exists := c.accounts[SAName]; exists && account.key != "" {
return c.accounts[SAName].key, nil
}
listKeysResult, err := c.common.cloud.StorageAccountClient.ListKeys(c.common.resourceGroup, SAName)
if err != nil {
return "", err
}
if listKeysResult.Keys == nil {
return "", fmt.Errorf("azureDisk - empty listKeysResult in storage account:%s keys", SAName)
}
for _, v := range *listKeysResult.Keys {
if v.Value != nil && *v.Value == "key1" {
if _, ok := c.accounts[SAName]; !ok {
glog.Warningf("azureDisk - account %s was not cached while getting keys", SAName)
return *v.Value, nil
}
}
c.accounts[SAName].key = *v.Value
return c.accounts[SAName].key, nil
}
return "", fmt.Errorf("couldn't find key named key1 in storage account:%s keys", SAName)
}
func (c *BlobDiskController) getBlobSvcClient(SAName string) (azstorage.BlobStorageClient, error) {
key := ""
var client azstorage.Client
var blobSvc azstorage.BlobStorageClient
var err error
if key, err = c.getStorageAccountKey(SAName); err != nil {
return blobSvc, err
}
if client, err = azstorage.NewBasicClient(SAName, key); err != nil {
return blobSvc, err
}
blobSvc = client.GetBlobService()
return blobSvc, nil
}
func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) error {
var err error
var blobSvc azstorage.BlobStorageClient
// short circut the check via local cache
// we are forgiving the fact that account may not be in cache yet
if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated {
return nil
}
// not cached, check existance and readiness
bExist, provisionState, _ := c.getStorageAccountState(storageAccountName)
// account does not exist
if !bExist {
return fmt.Errorf("azureDisk - account %s does not exist while trying to create/ensure default container", storageAccountName)
}
// account exists but not ready yet
if provisionState != storage.Succeeded {
// we don't want many attempts to validate the account readiness
// here hence we are locking
counter := 1
for swapped := atomic.CompareAndSwapInt32(&c.accounts[storageAccountName].isValidating, 0, 1); swapped != true; {
time.Sleep(3 * time.Second)
counter = counter + 1
// check if we passed the max sleep
if counter >= 20 {
return fmt.Errorf("azureDisk - timeout waiting to aquire lock to validate account:%s readiness", storageAccountName)
}
}
// swapped
defer func() {
c.accounts[storageAccountName].isValidating = 0
}()
// short circut the check again.
if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated {
return nil
}
err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) {
_, provisionState, err := c.getStorageAccountState(storageAccountName)
if err != nil {
glog.V(4).Infof("azureDisk - GetStorageAccount:%s err %s", storageAccountName, err.Error())
return false, err
}
if provisionState == storage.Succeeded {
return true, nil
}
glog.V(4).Infof("azureDisk - GetStorageAccount:%s not ready yet", storageAccountName)
// leave it for next loop/sync loop
return false, fmt.Errorf("azureDisk - Account %s has not been flagged Succeeded by ARM", storageAccountName)
})
// we have failed to ensure that account is ready for us to create
// the default vhd container
if err != nil {
return err
}
}
if blobSvc, err = c.getBlobSvcClient(storageAccountName); err != nil {
return err
}
container := blobSvc.GetContainerReference(defaultContainerName)
bCreated, err := container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate})
if err != nil {
return err
}
if bCreated {
glog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, defaultContainerName)
}
// flag so we no longer have to check on ARM
c.accounts[storageAccountName].defaultContainerCreated = true
return nil
}
// Gets Disk counts per storage account
func (c *BlobDiskController) getDiskCount(SAName string) (int, error) {
// if we have it in cache
if c.accounts[SAName].diskCount != -1 {
return int(c.accounts[SAName].diskCount), nil
}
var err error
var blobSvc azstorage.BlobStorageClient
if err = c.ensureDefaultContainer(SAName); err != nil {
return 0, err
}
if blobSvc, err = c.getBlobSvcClient(SAName); err != nil {
return 0, err
}
params := azstorage.ListBlobsParameters{}
container := blobSvc.GetContainerReference(defaultContainerName)
response, err := container.ListBlobs(params)
if err != nil {
return 0, err
}
glog.V(4).Infof("azure-Disk - refreshed data count for account %s and found %v", SAName, len(response.Blobs))
c.accounts[SAName].diskCount = int32(len(response.Blobs))
return int(c.accounts[SAName].diskCount), nil
}
// shouldInit ensures that we only init the plugin once
// and we only do that in the controller
func (c *BlobDiskController) shouldInit() bool {
if os.Args[0] == "kube-controller-manager" || (os.Args[0] == "/hyperkube" && os.Args[1] == "controller-manager") {
swapped := atomic.CompareAndSwapInt64(&initFlag, 0, 1)
if swapped {
return true
}
}
return false
}
func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccountState, error) {
accountListResult, err := c.common.cloud.StorageAccountClient.List()
if err != nil {
return nil, err
}
if accountListResult.Value == nil {
return nil, fmt.Errorf("azureDisk - empty accountListResult")
}
accounts := make(map[string]*storageAccountState)
for _, v := range *accountListResult.Value {
if strings.Index(*v.Name, storageAccountNameMatch) != 0 {
continue
}
if v.Name == nil || v.Sku == nil {
glog.Infof("azureDisk - accountListResult Name or Sku is nil")
continue
}
glog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name)
sastate := &storageAccountState{
name: *v.Name,
saType: (*v.Sku).Name,
diskCount: -1,
}
accounts[*v.Name] = sastate
}
return accounts, nil
}
func (c *BlobDiskController) createStorageAccount(storageAccountName string, storageAccountType storage.SkuName, location string, checkMaxAccounts bool) error {
bExist, _, _ := c.getStorageAccountState(storageAccountName)
if bExist {
newAccountState := &storageAccountState{
diskCount: -1,
saType: storageAccountType,
name: storageAccountName,
}
c.addAccountState(storageAccountName, newAccountState)
}
// Account Does not exist
if !bExist {
if len(c.accounts) == maxStorageAccounts && checkMaxAccounts {
return fmt.Errorf("azureDisk - can not create new storage account, current storage accounts count:%v Max is:%v", len(c.accounts), maxStorageAccounts)
}
glog.V(2).Infof("azureDisk - Creating storage account %s type %s \n", storageAccountName, string(storageAccountType))
cp := storage.AccountCreateParameters{
Sku: &storage.Sku{Name: storageAccountType},
Tags: &map[string]*string{"created-by": to.StringPtr("azure-dd")},
Location: &location}
cancel := make(chan struct{})
_, errChan := c.common.cloud.StorageAccountClient.Create(c.common.resourceGroup, storageAccountName, cp, cancel)
err := <-errChan
if err != nil {
return fmt.Errorf(fmt.Sprintf("Create Storage Account: %s, error: %s", storageAccountName, err))
}
newAccountState := &storageAccountState{
diskCount: -1,
saType: storageAccountType,
name: storageAccountName,
}
c.addAccountState(storageAccountName, newAccountState)
}
if !bExist {
// SA Accounts takes time to be provisioned
// so if this account was just created allow it sometime
// before polling
glog.V(2).Infof("azureDisk - storage account %s was just created, allowing time before polling status")
time.Sleep(25 * time.Second) // as observed 25 is the average time for SA to be provisioned
}
// finally, make sure that we default container is created
// before handing it back over
return c.ensureDefaultContainer(storageAccountName)
}
// finds a new suitable storageAccount for this disk
func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuName) (string, error) {
maxDiskCount := maxDisksPerStorageAccounts
SAName := ""
totalDiskCounts := 0
countAccounts := 0 // account of this type.
for _, v := range c.accounts {
// filter out any stand-alone disks/accounts
if strings.Index(v.name, storageAccountNameMatch) != 0 {
continue
}
// note: we compute avge stratified by type.
// this to enable user to grow per SA type to avoid low
//avg utilization on one account type skewing all data.
if v.saType == storageAccountType {
// compute average
dCount, err := c.getDiskCount(v.name)
if err != nil {
return "", err
}
totalDiskCounts = totalDiskCounts + dCount
countAccounts = countAccounts + 1
// empty account
if dCount == 0 {
glog.V(2).Infof("azureDisk - account %s identified for a new disk is because it has 0 allocated disks", v.name)
return v.name, nil // shortcircut, avg is good and no need to adjust
}
// if this account is less allocated
if dCount < maxDiskCount {
maxDiskCount = dCount
SAName = v.name
}
}
}
// if we failed to find storageaccount
if SAName == "" {
glog.V(2).Infof("azureDisk - failed to identify a suitable account for new disk and will attempt to create new account")
SAName = getAccountNameForNum(c.getNextAccountNum())
err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
if err != nil {
return "", err
}
return SAName, nil
}
disksAfter := totalDiskCounts + 1 // with the new one!
avgUtilization := float64(disksAfter) / float64(countAccounts*maxDisksPerStorageAccounts)
aboveAvg := (avgUtilization > storageAccountUtilizationBeforeGrowing)
// avg are not create and we should craete more accounts if we can
if aboveAvg && countAccounts < maxStorageAccounts {
glog.V(2).Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing)
SAName = getAccountNameForNum(c.getNextAccountNum())
err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
if err != nil {
return "", err
}
return SAName, nil
}
// avergates are not ok and we are at capacity(max storage accounts allowed)
if aboveAvg && countAccounts == maxStorageAccounts {
glog.Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts",
avgUtilization, storageAccountUtilizationBeforeGrowing, maxStorageAccounts)
}
// we found a storage accounts && [ avg are ok || we reached max sa count ]
return SAName, nil
}
func (c *BlobDiskController) getNextAccountNum() int {
max := 0
for k := range c.accounts {
// filter out accounts that are for standalone
if strings.Index(k, storageAccountNameMatch) != 0 {
continue
}
num := getAccountNumFromName(k)
if num > max {
max = num
}
}
return max + 1
}
func (c *BlobDiskController) deleteStorageAccount(storageAccountName string) error {
resp, err := c.common.cloud.StorageAccountClient.Delete(c.common.resourceGroup, storageAccountName)
if err != nil {
return fmt.Errorf("azureDisk - Delete of storage account '%s' failed with status %s...%v", storageAccountName, resp.Status, err)
}
c.removeAccountState(storageAccountName)
glog.Infof("azureDisk - Storage Account %s was deleted", storageAccountName)
return nil
}
//Gets storage account exist, provisionStatus, Error if any
func (c *BlobDiskController) getStorageAccountState(storageAccountName string) (bool, storage.ProvisioningState, error) {
account, err := c.common.cloud.StorageAccountClient.GetProperties(c.common.resourceGroup, storageAccountName)
if err != nil {
return false, "", err
}
return true, account.AccountProperties.ProvisioningState, nil
}
func (c *BlobDiskController) addAccountState(key string, state *storageAccountState) {
accountsLock.Lock()
defer accountsLock.Unlock()
if _, ok := c.accounts[key]; !ok {
c.accounts[key] = state
}
}
func (c *BlobDiskController) removeAccountState(key string) {
accountsLock.Lock()
defer accountsLock.Unlock()
delete(c.accounts, key)
}
// pads account num with zeros as needed
func getAccountNameForNum(num int) string {
sNum := strconv.Itoa(num)
missingZeros := 3 - len(sNum)
strZero := ""
for missingZeros > 0 {
strZero = strZero + "0"
missingZeros = missingZeros - 1
}
sNum = strZero + sNum
return fmt.Sprintf(storageAccountNamePrefix, sNum)
}
func getAccountNumFromName(accountName string) int {
nameLen := len(accountName)
num, _ := strconv.Atoi(accountName[nameLen-3:])
return num
}
func createVHDHeader(size uint64) ([]byte, error) {
h := vhd.CreateFixedHeader(size, &vhd.VHDOptions{})
b := new(bytes.Buffer)
err := binary.Write(b, binary.BigEndian, h)
if err != nil {
return nil, err
}
return b.Bytes(), nil
}
func diskNameandSANameFromURI(diskURI string) (string, string, error) {
uri, err := url.Parse(diskURI)
if err != nil {
return "", "", err
}
hostName := uri.Host
storageAccountName := strings.Split(hostName, ".")[0]
segments := strings.Split(uri.Path, "/")
diskNameVhd := segments[len(segments)-1]
return storageAccountName, diskNameVhd, nil
}

View File

@ -0,0 +1,270 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
"strings"
"time"
"k8s.io/apimachinery/pkg/types"
kwait "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/cloudprovider"
"github.com/Azure/azure-sdk-for-go/arm/compute"
"github.com/golang/glog"
)
const (
defaultDataDiskCount int = 16 // which will allow you to work with most medium size VMs (if not found in map)
storageAccountNameTemplate = "pvc%s"
// for limits check https://docs.microsoft.com/en-us/azure/azure-subscription-service-limits#storage-limits
maxStorageAccounts = 100 // max # is 200 (250 with special request). this allows 100 for everything else including stand alone disks
maxDisksPerStorageAccounts = 60
storageAccountUtilizationBeforeGrowing = 0.5
storageAccountsCountInit = 2 // When the plug-in is init-ed, 2 storage accounts will be created to allow fast pvc create/attach/mount
maxLUN = 64 // max number of LUNs per VM
errLeaseFailed = "AcquireDiskLeaseFailed"
errLeaseIDMissing = "LeaseIdMissing"
errContainerNotFound = "ContainerNotFound"
)
var defaultBackOff = kwait.Backoff{
Steps: 20,
Duration: 2 * time.Second,
Factor: 1.5,
Jitter: 0.0,
}
type controllerCommon struct {
tenantID string
subscriptionID string
location string
storageEndpointSuffix string
resourceGroup string
clientID string
clientSecret string
managementEndpoint string
tokenEndPoint string
aadResourceEndPoint string
aadToken string
expiresOn time.Time
cloud *Cloud
}
// AttachDisk attaches a vhd to vm
// the vhd must exist, can be identified by diskName, diskURI, and lun.
func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
if err != nil {
return err
} else if !exists {
return cloudprovider.InstanceNotFound
}
disks := *vm.StorageProfile.DataDisks
if isManagedDisk {
disks = append(disks,
compute.DataDisk{
Name: &diskName,
Lun: &lun,
Caching: cachingMode,
CreateOption: "attach",
ManagedDisk: &compute.ManagedDiskParameters{
ID: &diskURI,
},
})
} else {
disks = append(disks,
compute.DataDisk{
Name: &diskName,
Vhd: &compute.VirtualHardDisk{
URI: &diskURI,
},
Lun: &lun,
Caching: cachingMode,
CreateOption: "attach",
})
}
newVM := compute.VirtualMachine{
Location: vm.Location,
VirtualMachineProperties: &compute.VirtualMachineProperties{
StorageProfile: &compute.StorageProfile{
DataDisks: &disks,
},
},
}
vmName := mapNodeNameToVMName(nodeName)
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", c.resourceGroup, vmName)
c.cloud.operationPollRateLimiter.Accept()
respChan, errChan := c.cloud.VirtualMachinesClient.CreateOrUpdate(c.resourceGroup, vmName, newVM, nil)
resp := <-respChan
err = <-errChan
if c.cloud.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", c.resourceGroup, vmName)
retryErr := c.cloud.CreateOrUpdateVMWithRetry(vmName, newVM)
if retryErr != nil {
err = retryErr
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", c.resourceGroup, vmName)
}
}
if err != nil {
glog.Errorf("azureDisk - azure attach failed, err: %v", err)
detail := err.Error()
if strings.Contains(detail, errLeaseFailed) {
// if lease cannot be acquired, immediately detach the disk and return the original error
glog.Infof("azureDisk - failed to acquire disk lease, try detach")
c.cloud.DetachDiskByName(diskName, diskURI, nodeName)
}
} else {
glog.V(4).Infof("azureDisk - azure attach succeeded")
}
return err
}
// DetachDiskByName detaches a vhd from host
// the vhd can be identified by diskName or diskURI
func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
if err != nil || !exists {
// if host doesn't exist, no need to detach
glog.Warningf("azureDisk - cannot find node %s, skip detaching disk %s", nodeName, diskName)
return nil
}
disks := *vm.StorageProfile.DataDisks
bFoundDisk := false
for i, disk := range disks {
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
(disk.ManagedDisk != nil && diskURI != "" && *disk.ManagedDisk.ID == diskURI) {
// found the disk
glog.V(4).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
disks = append(disks[:i], disks[i+1:]...)
bFoundDisk = true
break
}
}
if !bFoundDisk {
return fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI)
}
newVM := compute.VirtualMachine{
Location: vm.Location,
VirtualMachineProperties: &compute.VirtualMachineProperties{
StorageProfile: &compute.StorageProfile{
DataDisks: &disks,
},
},
}
vmName := mapNodeNameToVMName(nodeName)
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", c.resourceGroup, vmName)
c.cloud.operationPollRateLimiter.Accept()
respChan, errChan := c.cloud.VirtualMachinesClient.CreateOrUpdate(c.resourceGroup, vmName, newVM, nil)
resp := <-respChan
err = <-errChan
if c.cloud.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", c.resourceGroup, vmName)
retryErr := c.cloud.CreateOrUpdateVMWithRetry(vmName, newVM)
if retryErr != nil {
err = retryErr
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", c.cloud.ResourceGroup, vmName)
}
}
if err != nil {
glog.Errorf("azureDisk - azure disk detach failed, err: %v", err)
} else {
glog.V(4).Infof("azureDisk - azure disk detach succeeded")
}
return err
}
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI
func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
if err != nil {
return -1, err
} else if !exists {
return -1, cloudprovider.InstanceNotFound
}
disks := *vm.StorageProfile.DataDisks
for _, disk := range disks {
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
(disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) {
// found the disk
glog.V(4).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
return *disk.Lun, nil
}
}
return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName)
}
// GetNextDiskLun searches all vhd attachment on the host and find unused lun
// return -1 if all luns are used
func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
if err != nil {
return -1, err
} else if !exists {
return -1, cloudprovider.InstanceNotFound
}
used := make([]bool, maxLUN)
disks := *vm.StorageProfile.DataDisks
for _, disk := range disks {
if disk.Lun != nil {
used[*disk.Lun] = true
}
}
for k, v := range used {
if !v {
return int32(k), nil
}
}
return -1, fmt.Errorf("All Luns are used")
}
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName
func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
attached := make(map[string]bool)
for _, diskName := range diskNames {
attached[diskName] = false
}
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
if !exists {
// if host doesn't exist, no need to detach
glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
nodeName, diskNames)
return attached, nil
} else if err != nil {
return attached, err
}
disks := *vm.StorageProfile.DataDisks
for _, disk := range disks {
for _, diskName := range diskNames {
if disk.Name != nil && diskName != "" && *disk.Name == diskName {
attached[diskName] = true
}
}
}
return attached, nil
}

View File

@ -23,6 +23,10 @@ import (
"github.com/golang/glog"
)
const (
useHTTPS = true
)
// create file share
func (az *Cloud) createFileShare(accountName, accountKey, name string, sizeGB int) error {
fileClient, err := az.getFileSvcClient(accountName, accountKey)
@ -55,7 +59,7 @@ func (az *Cloud) deleteFileShare(accountName, accountKey, name string) error {
share := fileClient.GetShareReference(name)
return share.Delete(nil)
}
return err
return nil
}
func (az *Cloud) getFileSvcClient(accountName, accountKey string) (*azs.FileServiceClient, error) {

View File

@ -0,0 +1,129 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"path"
"strings"
"github.com/Azure/azure-sdk-for-go/arm/disk"
storage "github.com/Azure/azure-sdk-for-go/arm/storage"
"github.com/golang/glog"
kwait "k8s.io/apimachinery/pkg/util/wait"
)
//ManagedDiskController : managed disk controller struct
type ManagedDiskController struct {
common *controllerCommon
}
func newManagedDiskController(common *controllerCommon) (*ManagedDiskController, error) {
return &ManagedDiskController{common: common}, nil
}
//CreateManagedDisk : create managed disk
func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccountType storage.SkuName, sizeGB int, tags map[string]string) (string, error) {
glog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", diskName, storageAccountType, sizeGB)
newTags := make(map[string]*string)
azureDDTag := "kubernetes-azure-dd"
newTags["created-by"] = &azureDDTag
// insert original tags to newTags
if tags != nil {
for k, v := range tags {
// Azure won't allow / (forward slash) in tags
newKey := strings.Replace(k, "/", "-", -1)
newValue := strings.Replace(v, "/", "-", -1)
newTags[newKey] = &newValue
}
}
diskSizeGB := int32(sizeGB)
model := disk.Model{
Location: &c.common.location,
Tags: &newTags,
Properties: &disk.Properties{
AccountType: disk.StorageAccountTypes(storageAccountType),
DiskSizeGB: &diskSizeGB,
CreationData: &disk.CreationData{CreateOption: disk.Empty},
}}
cancel := make(chan struct{})
respChan, errChan := c.common.cloud.DisksClient.CreateOrUpdate(c.common.resourceGroup, diskName, model, cancel)
<-respChan
err := <-errChan
if err != nil {
return "", err
}
diskID := ""
err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) {
provisonState, id, err := c.getDisk(diskName)
diskID = id
// We are waiting for provisioningState==Succeeded
// We don't want to hand-off managed disks to k8s while they are
//still being provisioned, this is to avoid some race conditions
if err != nil {
return false, err
}
if strings.ToLower(provisonState) == "succeeded" {
return true, nil
}
return false, nil
})
if err != nil {
glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v but was unable to confirm provisioningState in poll process", diskName, storageAccountType, sizeGB)
} else {
glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v", diskName, storageAccountType, sizeGB)
}
return diskID, nil
}
//DeleteManagedDisk : delete managed disk
func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error {
diskName := path.Base(diskURI)
cancel := make(chan struct{})
respChan, errChan := c.common.cloud.DisksClient.Delete(c.common.resourceGroup, diskName, cancel)
<-respChan
err := <-errChan
if err != nil {
return err
}
// We don't need poll here, k8s will immediatly stop referencing the disk
// the disk will be evantually deleted - cleanly - by ARM
glog.V(2).Infof("azureDisk - deleted a managed disk: %s", diskURI)
return nil
}
// return: disk provisionState, diskID, error
func (c *ManagedDiskController) getDisk(diskName string) (string, string, error) {
result, err := c.common.cloud.DisksClient.Get(c.common.resourceGroup, diskName)
if err != nil {
return "", "", err
}
if result.Properties != nil && (*result.Properties).ProvisioningState != nil {
return *(*result.Properties).ProvisioningState, *result.ID, nil
}
return "", "", err
}

View File

@ -18,264 +18,10 @@ package azure
import (
"fmt"
"strings"
"github.com/Azure/azure-sdk-for-go/arm/compute"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/volume"
)
const (
maxLUN = 64 // max number of LUNs per VM
errLeaseFailed = "AcquireDiskLeaseFailed"
errLeaseIDMissing = "LeaseIdMissing"
errContainerNotFound = "ContainerNotFound"
)
// AttachDisk attaches a vhd to vm
// the vhd must exist, can be identified by diskName, diskURI, and lun.
func (az *Cloud) AttachDisk(diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
vm, exists, err := az.getVirtualMachine(nodeName)
if err != nil {
return err
} else if !exists {
return cloudprovider.InstanceNotFound
}
disks := *vm.StorageProfile.DataDisks
disks = append(disks,
compute.DataDisk{
Name: &diskName,
Vhd: &compute.VirtualHardDisk{
URI: &diskURI,
},
Lun: &lun,
Caching: cachingMode,
CreateOption: "attach",
})
newVM := compute.VirtualMachine{
Location: vm.Location,
VirtualMachineProperties: &compute.VirtualMachineProperties{
StorageProfile: &compute.StorageProfile{
DataDisks: &disks,
},
},
}
vmName := mapNodeNameToVMName(nodeName)
glog.V(2).Infof("create(%s): vm(%s)", az.ResourceGroup, vmName)
az.operationPollRateLimiter.Accept()
respChan, errChan := az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil)
resp := <-respChan
err = <-errChan
if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
glog.V(2).Infof("create(%s) backing off: vm(%s)", az.ResourceGroup, vmName)
retryErr := az.CreateOrUpdateVMWithRetry(vmName, newVM)
if retryErr != nil {
err = retryErr
glog.V(2).Infof("create(%s) abort backoff: vm(%s)", az.ResourceGroup, vmName)
}
}
if err != nil {
glog.Errorf("azure attach failed, err: %v", err)
detail := err.Error()
if strings.Contains(detail, errLeaseFailed) {
// if lease cannot be acquired, immediately detach the disk and return the original error
glog.Infof("failed to acquire disk lease, try detach")
az.DetachDiskByName(diskName, diskURI, nodeName)
}
} else {
glog.V(4).Infof("azure attach succeeded")
}
return err
}
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName
func (az *Cloud) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
attached := make(map[string]bool)
for _, diskName := range diskNames {
attached[diskName] = false
}
vm, exists, err := az.getVirtualMachine(nodeName)
if !exists {
// if host doesn't exist, no need to detach
glog.Warningf("Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
nodeName, diskNames)
return attached, nil
} else if err != nil {
return attached, err
}
disks := *vm.StorageProfile.DataDisks
for _, disk := range disks {
for _, diskName := range diskNames {
if disk.Name != nil && diskName != "" && *disk.Name == diskName {
attached[diskName] = true
}
}
}
return attached, nil
}
// DetachDiskByName detaches a vhd from host
// the vhd can be identified by diskName or diskURI
func (az *Cloud) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
vm, exists, err := az.getVirtualMachine(nodeName)
if err != nil || !exists {
// if host doesn't exist, no need to detach
glog.Warningf("cannot find node %s, skip detaching disk %s", nodeName, diskName)
return nil
}
disks := *vm.StorageProfile.DataDisks
for i, disk := range disks {
if (disk.Name != nil && diskName != "" && *disk.Name == diskName) || (disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) {
// found the disk
glog.V(4).Infof("detach disk: name %q uri %q", diskName, diskURI)
disks = append(disks[:i], disks[i+1:]...)
break
}
}
newVM := compute.VirtualMachine{
Location: vm.Location,
VirtualMachineProperties: &compute.VirtualMachineProperties{
StorageProfile: &compute.StorageProfile{
DataDisks: &disks,
},
},
}
vmName := mapNodeNameToVMName(nodeName)
glog.V(2).Infof("create(%s): vm(%s)", az.ResourceGroup, vmName)
az.operationPollRateLimiter.Accept()
respChan, errChan := az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil)
resp := <-respChan
err = <-errChan
if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
glog.V(2).Infof("create(%s) backing off: vm(%s)", az.ResourceGroup, vmName)
retryErr := az.CreateOrUpdateVMWithRetry(vmName, newVM)
if retryErr != nil {
err = retryErr
glog.V(2).Infof("create(%s) abort backoff: vm(%s)", az.ResourceGroup, vmName)
}
}
if err != nil {
glog.Errorf("azure disk detach failed, err: %v", err)
} else {
glog.V(4).Infof("azure disk detach succeeded")
}
return err
}
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI
func (az *Cloud) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
vm, exists, err := az.getVirtualMachine(nodeName)
if err != nil {
return -1, err
} else if !exists {
return -1, cloudprovider.InstanceNotFound
}
disks := *vm.StorageProfile.DataDisks
for _, disk := range disks {
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) || (disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) {
// found the disk
glog.V(4).Infof("find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
return *disk.Lun, nil
}
}
return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName)
}
// GetNextDiskLun searches all vhd attachment on the host and find unused lun
// return -1 if all luns are used
func (az *Cloud) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
vm, exists, err := az.getVirtualMachine(nodeName)
if err != nil {
return -1, err
} else if !exists {
return -1, cloudprovider.InstanceNotFound
}
used := make([]bool, maxLUN)
disks := *vm.StorageProfile.DataDisks
for _, disk := range disks {
if disk.Lun != nil {
used[*disk.Lun] = true
}
}
for k, v := range used {
if !v {
return int32(k), nil
}
}
return -1, fmt.Errorf("All Luns are used")
}
// CreateVolume creates a VHD blob in a storage account that has storageType and location using the given storage account.
// If no storage account is given, search all the storage accounts associated with the resource group and pick one that
// fits storage type and location.
func (az *Cloud) CreateVolume(name, storageAccount, storageType, location string, requestGB int) (string, string, int, error) {
var err error
accounts := []accountWithLocation{}
if len(storageAccount) > 0 {
accounts = append(accounts, accountWithLocation{Name: storageAccount})
} else {
// find a storage account
accounts, err = az.getStorageAccounts()
if err != nil {
// TODO: create a storage account and container
return "", "", 0, err
}
}
for _, account := range accounts {
glog.V(4).Infof("account %s type %s location %s", account.Name, account.StorageType, account.Location)
if ((storageType == "" || account.StorageType == storageType) && (location == "" || account.Location == location)) || len(storageAccount) > 0 {
// find the access key with this account
key, err := az.getStorageAccesskey(account.Name)
if err != nil {
glog.V(2).Infof("no key found for storage account %s", account.Name)
continue
}
// create a page blob in this account's vhd container
name, uri, err := az.createVhdBlob(account.Name, key, name, int64(requestGB), nil)
if err != nil {
glog.V(2).Infof("failed to create vhd in account %s: %v", account.Name, err)
continue
}
glog.V(4).Infof("created vhd blob uri: %s", uri)
return name, uri, requestGB, err
}
}
return "", "", 0, fmt.Errorf("failed to find a matching storage account")
}
// DeleteVolume deletes a VHD blob
func (az *Cloud) DeleteVolume(name, uri string) error {
accountName, blob, err := az.getBlobNameAndAccountFromURI(uri)
if err != nil {
return fmt.Errorf("failed to parse vhd URI %v", err)
}
key, err := az.getStorageAccesskey(accountName)
if err != nil {
return fmt.Errorf("no key for storage account %s, err %v", accountName, err)
}
err = az.deleteVhdBlob(accountName, key, blob)
if err != nil {
glog.Warningf("failed to delete blob %s err: %v", uri, err)
detail := err.Error()
if strings.Contains(detail, errLeaseIDMissing) {
// disk is still being used
// see https://msdn.microsoft.com/en-us/library/microsoft.windowsazure.storage.blob.protocol.bloberrorcodestrings.leaseidmissing.aspx
return volume.NewDeletedVolumeInUseError(fmt.Sprintf("disk %q is still in use while being deleted", name))
}
return fmt.Errorf("failed to delete vhd %v, account %s, blob %s, err: %v", uri, accountName, blob, err)
}
glog.V(4).Infof("blob %s deleted", uri)
return nil
}
// CreateFileShare creates a file share, using a matching storage account
func (az *Cloud) CreateFileShare(name, storageAccount, storageType, location string, requestGB int) (string, string, error) {
var err error

View File

@ -19,7 +19,9 @@ package azure
import (
"errors"
"fmt"
"hash/crc32"
"regexp"
"strconv"
"strings"
"k8s.io/api/core/v1"
@ -293,3 +295,58 @@ func splitProviderID(providerID string) (types.NodeName, error) {
}
return types.NodeName(matches[1]), nil
}
var polyTable = crc32.MakeTable(crc32.Koopman)
//MakeCRC32 : convert string to CRC32 format
func MakeCRC32(str string) string {
crc := crc32.New(polyTable)
crc.Write([]byte(str))
hash := crc.Sum32()
return strconv.FormatUint(uint64(hash), 10)
}
//ExtractVMData : extract dataDisks, storageProfile from a map struct
func ExtractVMData(vmData map[string]interface{}) (dataDisks []interface{},
storageProfile map[string]interface{},
hardwareProfile map[string]interface{}, err error) {
props, ok := vmData["properties"].(map[string]interface{})
if !ok {
return nil, nil, nil, fmt.Errorf("convert vmData(properties) to map error")
}
storageProfile, ok = props["storageProfile"].(map[string]interface{})
if !ok {
return nil, nil, nil, fmt.Errorf("convert vmData(storageProfile) to map error")
}
hardwareProfile, ok = props["hardwareProfile"].(map[string]interface{})
if !ok {
return nil, nil, nil, fmt.Errorf("convert vmData(hardwareProfile) to map error")
}
dataDisks, ok = storageProfile["dataDisks"].([]interface{})
if !ok {
return nil, nil, nil, fmt.Errorf("convert vmData(dataDisks) to map error")
}
return dataDisks, storageProfile, hardwareProfile, nil
}
//ExtractDiskData : extract provisioningState, diskState from a map struct
func ExtractDiskData(diskData interface{}) (provisioningState string, diskState string, err error) {
fragment, ok := diskData.(map[string]interface{})
if !ok {
return "", "", fmt.Errorf("convert diskData to map error")
}
properties, ok := fragment["properties"].(map[string]interface{})
if !ok {
return "", "", fmt.Errorf("convert diskData(properties) to map error")
}
provisioningState, ok = properties["provisioningState"].(string) // if there is a disk, provisioningState property will be there
if ref, ok := properties["diskState"]; ok {
diskState = ref.(string)
}
return provisioningState, diskState, nil
}

View File

@ -1,38 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"bytes"
"encoding/binary"
"github.com/rubiojr/go-vhd/vhd"
)
const (
vhdHeaderSize = vhd.VHD_HEADER_SIZE
)
func createVHDHeader(size uint64) ([]byte, error) {
h := vhd.CreateFixedHeader(size, &vhd.VHDOptions{})
b := new(bytes.Buffer)
err := binary.Write(b, binary.BigEndian, h)
if err != nil {
return nil, err
}
return b.Bytes(), nil
}

View File

@ -12,12 +12,14 @@ go_library(
name = "go_default_library",
srcs = [
"attacher.go",
"azure_common.go",
"azure_dd.go",
"azure_mounter.go",
"azure_provision.go",
"vhd_util.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/azure:go_default_library",
"//pkg/util/exec:go_default_library",
@ -26,37 +28,18 @@ go_library(
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"azure_dd_test.go",
"vhd_util_test.go",
],
library = ":go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/util/exec:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
@ -69,3 +52,20 @@ filegroup(
srcs = [":package-srcs"],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = [
"azure_common_test.go",
"azure_dd_test.go",
],
library = ":go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/util/exec:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)

View File

@ -26,54 +26,43 @@ import (
"github.com/Azure/azure-sdk-for-go/arm/compute"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/keymutex"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
type azureDiskDetacher struct {
plugin *azureDataDiskPlugin
cloud *azure.Cloud
}
type azureDiskAttacher struct {
host volume.VolumeHost
azureProvider azureCloudProvider
plugin *azureDataDiskPlugin
cloud *azure.Cloud
}
var _ volume.Attacher = &azureDiskAttacher{}
var _ volume.AttachableVolumePlugin = &azureDataDiskPlugin{}
const (
checkSleepDuration = time.Second
)
var _ volume.Detacher = &azureDiskDetacher{}
// acquire lock to get an lun number
var getLunMutex = keymutex.NewKeyMutex()
// NewAttacher initializes an Attacher
func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) {
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
glog.V(4).Infof("failed to get azure provider")
return nil, err
}
return &azureDiskAttacher{
host: plugin.host,
azureProvider: azure,
}, nil
}
// Attach attaches a volume.Spec to an Azure VM referenced by NodeName, returning the disk's LUN
func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
volumeSource, err := getVolumeSource(spec)
if err != nil {
glog.Warningf("failed to get azure disk spec")
return "", err
}
instanceid, err := attacher.azureProvider.InstanceID(nodeName)
instanceid, err := a.cloud.InstanceID(nodeName)
if err != nil {
glog.Warningf("failed to get azure instance id")
return "", fmt.Errorf("failed to get azure instance id for node %q", nodeName)
@ -82,7 +71,12 @@ func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.Node
instanceid = instanceid[(ind + 1):]
}
lun, err := attacher.azureProvider.GetDiskLun(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName)
diskController, err := getDiskController(a.plugin.host)
if err != nil {
return "", err
}
lun, err := diskController.GetDiskLun(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName)
if err == cloudprovider.InstanceNotFound {
// Log error and continue with attach
glog.Warningf(
@ -98,13 +92,14 @@ func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.Node
getLunMutex.LockKey(instanceid)
defer getLunMutex.UnlockKey(instanceid)
lun, err = attacher.azureProvider.GetNextDiskLun(nodeName)
lun, err = diskController.GetNextDiskLun(nodeName)
if err != nil {
glog.Warningf("no LUN available for instance %q", nodeName)
return "", fmt.Errorf("all LUNs are used, cannot attach volume %q to instance %q", volumeSource.DiskName, instanceid)
}
glog.V(4).Infof("Trying to attach volume %q lun %d to node %q.", volumeSource.DataDiskURI, lun, nodeName)
err = attacher.azureProvider.AttachDisk(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, lun, compute.CachingTypes(*volumeSource.CachingMode))
isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk)
err = diskController.AttachDisk(isManagedDisk, volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, lun, compute.CachingTypes(*volumeSource.CachingMode))
if err == nil {
glog.V(4).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName)
} else {
@ -116,14 +111,14 @@ func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.Node
return strconv.Itoa(int(lun)), err
}
func (attacher *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
volumesAttachedCheck := make(map[*volume.Spec]bool)
volumeSpecMap := make(map[string]*volume.Spec)
volumeIDList := []string{}
for _, spec := range specs {
volumeSource, err := getVolumeSource(spec)
if err != nil {
glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
glog.Errorf("azureDisk - Error getting volume (%q) source : %v", spec.Name(), err)
continue
}
@ -131,11 +126,16 @@ func (attacher *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, node
volumesAttachedCheck[spec] = true
volumeSpecMap[volumeSource.DiskName] = spec
}
attachedResult, err := attacher.azureProvider.DisksAreAttached(volumeIDList, nodeName)
diskController, err := getDiskController(a.plugin.host)
if err != nil {
return nil, err
}
attachedResult, err := diskController.DisksAreAttached(volumeIDList, nodeName)
if err != nil {
// Log error and continue with attach
glog.Errorf(
"Error checking if volumes (%v) are attached to current node (%q). err=%v",
"azureDisk - Error checking if volumes (%v) are attached to current node (%q). err=%v",
volumeIDList, nodeName, err)
return volumesAttachedCheck, err
}
@ -144,71 +144,84 @@ func (attacher *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, node
if !attached {
spec := volumeSpecMap[volumeID]
volumesAttachedCheck[spec] = false
glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name())
glog.V(2).Infof("azureDisk - VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name())
}
}
return volumesAttachedCheck, nil
}
// WaitForAttach runs on the node to detect if the volume (referenced by LUN) is attached. If attached, the device path is returned
func (attacher *azureDiskAttacher) WaitForAttach(spec *volume.Spec, lunStr string, timeout time.Duration) (string, error) {
func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) {
var err error
lun, err := strconv.Atoi(devicePath)
if err != nil {
return "", fmt.Errorf("azureDisk - Wait for attach expect device path as a lun number, instead got: %s", devicePath)
}
volumeSource, err := getVolumeSource(spec)
if err != nil {
return "", err
}
if len(lunStr) == 0 {
return "", fmt.Errorf("WaitForAttach failed for Azure disk %q: lun is empty.", volumeSource.DiskName)
}
io := &osIOHandler{}
scsiHostRescan(io)
lun, err := strconv.Atoi(lunStr)
if err != nil {
return "", fmt.Errorf("WaitForAttach: wrong lun %q, err: %v", lunStr, err)
}
scsiHostRescan(&osIOHandler{})
exe := exec.New()
devicePath := ""
diskName := volumeSource.DiskName
nodeName := a.plugin.host.GetHostName()
newDevicePath := ""
err = wait.Poll(checkSleepDuration, timeout, func() (bool, error) {
glog.V(4).Infof("Checking Azure disk %q(lun %s) is attached.", volumeSource.DiskName, lunStr)
if devicePath, err = findDiskByLun(lun, &osIOHandler{}, exe); err == nil {
if len(devicePath) == 0 {
glog.Warningf("cannot find attached Azure disk %q(lun %s) locally.", volumeSource.DiskName, lunStr)
return false, fmt.Errorf("cannot find attached Azure disk %q(lun %s) locally.", volumeSource.DiskName, lunStr)
}
glog.V(4).Infof("Successfully found attached Azure disk %q(lun %s, device path %s).", volumeSource.DiskName, lunStr, devicePath)
return true, nil
} else {
//Log error, if any, and continue checking periodically
glog.V(4).Infof("Error Stat Azure disk (%q) is attached: %v", volumeSource.DiskName, err)
return false, nil
err = wait.Poll(1*time.Second, timeout, func() (bool, error) {
exe := exec.New()
if newDevicePath, err = findDiskByLun(lun, io, exe); err != nil {
return false, fmt.Errorf("azureDisk - WaitForAttach ticker failed node (%s) disk (%s) lun(%v) err(%s)", nodeName, diskName, lun, err)
}
// did we find it?
if newDevicePath != "" {
// the curent sequence k8s uses for unformated disk (check-disk, mount, fail, mkfs.extX) hangs on
// Azure Managed disk scsi interface. this is a hack and will be replaced once we identify and solve
// the root case on Azure.
formatIfNotFormatted(newDevicePath, *volumeSource.FSType)
return true, nil
}
return false, fmt.Errorf("azureDisk - WaitForAttach failed within timeout node (%s) diskId:(%s) lun:(%v)", nodeName, diskName, lun)
})
return devicePath, err
return newDevicePath, err
}
// GetDeviceMountPath finds the volume's mount path on the node
func (attacher *azureDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
// to avoid name conflicts (similar *.vhd name)
// we use hash diskUri and we use it as device mount target.
// this is generalized for both managed and blob disks
// we also prefix the hash with m/b based on disk kind
func (a *azureDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
volumeSource, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return makeGlobalPDPath(attacher.host, volumeSource.DiskName), nil
if volumeSource.Kind == nil { // this spec was constructed from info on the node
pdPath := path.Join(a.plugin.host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, volumeSource.DataDiskURI)
return pdPath, nil
}
isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk)
return makeGlobalPDPath(a.plugin.host, volumeSource.DataDiskURI, isManagedDisk)
}
// MountDevice runs mount command on the node to mount the volume
func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
mounter := attacher.host.GetMounter()
mounter := attacher.plugin.host.GetMounter()
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
return err
return fmt.Errorf("azureDisk - mountDevice:CreateDirectory failed with %s", err)
}
notMnt = true
} else {
return err
return fmt.Errorf("azureDisk - mountDevice:IsLikelyNotMountPoint failed with %s", err)
}
}
@ -218,47 +231,27 @@ func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath str
}
options := []string{}
if spec.ReadOnly {
options = append(options, "ro")
}
if notMnt {
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}
mountOptions := volume.MountOptionFromSpec(spec, options...)
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, *volumeSource.FSType, mountOptions)
if err != nil {
os.Remove(deviceMountPath)
return err
if cleanErr := os.Remove(deviceMountPath); cleanErr != nil {
return fmt.Errorf("azureDisk - mountDevice:FormatAndMount failed with %s and clean up failed with :%v", err, cleanErr)
}
return fmt.Errorf("azureDisk - mountDevice:FormatAndMount failed with %s", err)
}
}
return nil
}
type azureDiskDetacher struct {
mounter mount.Interface
azureProvider azureCloudProvider
}
var _ volume.Detacher = &azureDiskDetacher{}
// NewDetacher initializes a volume Detacher
func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) {
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
return nil, err
}
return &azureDiskDetacher{
mounter: plugin.host.GetMounter(),
azureProvider: azure,
}, nil
}
// Detach detaches disk from Azure VM.
func (detacher *azureDiskDetacher) Detach(diskName string, nodeName types.NodeName) error {
if diskName == "" {
return fmt.Errorf("invalid disk to detach: %q", diskName)
func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) error {
if diskURI == "" {
return fmt.Errorf("invalid disk to detach: %q", diskURI)
}
instanceid, err := detacher.azureProvider.InstanceID(nodeName)
instanceid, err := d.cloud.InstanceID(nodeName)
if err != nil {
glog.Warningf("no instance id for node %q, skip detaching", nodeName)
return nil
@ -267,22 +260,28 @@ func (detacher *azureDiskDetacher) Detach(diskName string, nodeName types.NodeNa
instanceid = instanceid[(ind + 1):]
}
glog.V(4).Infof("detach %v from node %q", diskName, nodeName)
err = detacher.azureProvider.DetachDiskByName(diskName, "" /* diskURI */, nodeName)
glog.V(4).Infof("detach %v from node %q", diskURI, nodeName)
diskController, err := getDiskController(d.plugin.host)
if err != nil {
glog.Errorf("failed to detach azure disk %q, err %v", diskName, err)
return err
}
err = diskController.DetachDiskByName("", diskURI, nodeName)
if err != nil {
glog.Errorf("failed to detach azure disk %q, err %v", diskURI, err)
}
glog.V(2).Infof("azureDisk - disk:%s was detached from node:%v", diskURI, nodeName)
return err
}
// UnmountDevice unmounts the volume on the node
func (detacher *azureDiskDetacher) UnmountDevice(deviceMountPath string) error {
volume := path.Base(deviceMountPath)
if err := util.UnmountPath(deviceMountPath, detacher.mounter); err != nil {
glog.Errorf("Error unmounting %q: %v", volume, err)
return err
err := volumeutil.UnmountPath(deviceMountPath, detacher.plugin.host.GetMounter())
if err == nil {
glog.V(4).Infof("azureDisk - Device %s was unmounted", deviceMountPath)
} else {
return nil
glog.Infof("azureDisk - Device %s failed to unmount with error: %s", deviceMountPath, err.Error())
}
return err
}

View File

@ -0,0 +1,342 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"fmt"
"io/ioutil"
"os"
"path"
"regexp"
"strconv"
libstrings "strings"
storage "github.com/Azure/azure-sdk-for-go/arm/storage"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
)
const (
defaultFSType = "ext4"
defaultStorageAccountType = storage.StandardLRS
)
type dataDisk struct {
volume.MetricsProvider
volumeName string
diskName string
podUID types.UID
}
var (
supportedCachingModes = sets.NewString(
string(api.AzureDataDiskCachingNone),
string(api.AzureDataDiskCachingReadOnly),
string(api.AzureDataDiskCachingReadWrite))
supportedDiskKinds = sets.NewString(
string(api.AzureSharedBlobDisk),
string(api.AzureDedicatedBlobDisk),
string(api.AzureManagedDisk))
supportedStorageAccountTypes = sets.NewString("Premium_LRS", "Standard_LRS")
)
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
return host.GetPodVolumeDir(uid, strings.EscapeQualifiedNameForDisk(azureDataDiskPluginName), volName)
}
// creates a unique path for disks (even if they share the same *.vhd name)
func makeGlobalPDPath(host volume.VolumeHost, diskUri string, isManaged bool) (string, error) {
diskUri = libstrings.ToLower(diskUri) // always lower uri because users may enter it in caps.
uniqueDiskNameTemplate := "%s%s"
hashedDiskUri := azure.MakeCRC32(diskUri)
prefix := "b"
if isManaged {
prefix = "m"
}
// "{m for managed b for blob}{hashed diskUri or DiskId depending on disk kind }"
diskName := fmt.Sprintf(uniqueDiskNameTemplate, prefix, hashedDiskUri)
pdPath := path.Join(host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, diskName)
return pdPath, nil
}
func makeDataDisk(volumeName string, podUID types.UID, diskName string, host volume.VolumeHost) *dataDisk {
var metricProvider volume.MetricsProvider
if podUID != "" {
metricProvider = volume.NewMetricsStatFS(getPath(podUID, volumeName, host))
}
return &dataDisk{
MetricsProvider: metricProvider,
volumeName: volumeName,
diskName: diskName,
podUID: podUID,
}
}
func getVolumeSource(spec *volume.Spec) (*v1.AzureDiskVolumeSource, error) {
if spec.Volume != nil && spec.Volume.AzureDisk != nil {
return spec.Volume.AzureDisk, nil
}
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil {
return spec.PersistentVolume.Spec.AzureDisk, nil
}
return nil, fmt.Errorf("azureDisk - Spec does not reference an Azure disk volume type")
}
func normalizeFsType(fsType string) string {
if fsType == "" {
return defaultFSType
}
return fsType
}
func normalizeKind(kind string) (v1.AzureDataDiskKind, error) {
if kind == "" {
return v1.AzureDedicatedBlobDisk, nil
}
if !supportedDiskKinds.Has(kind) {
return "", fmt.Errorf("azureDisk - %s is not supported disk kind. Supported values are %s", kind, supportedDiskKinds.List())
}
return v1.AzureDataDiskKind(kind), nil
}
func normalizeStorageAccountType(storageAccountType string) (storage.SkuName, error) {
if storageAccountType == "" {
return defaultStorageAccountType, nil
}
if !supportedStorageAccountTypes.Has(storageAccountType) {
return "", fmt.Errorf("azureDisk - %s is not supported sku/storageaccounttype. Supported values are %s", storageAccountType, supportedStorageAccountTypes.List())
}
return storage.SkuName(storageAccountType), nil
}
func normalizeCachingMode(cachingMode v1.AzureDataDiskCachingMode) (v1.AzureDataDiskCachingMode, error) {
if cachingMode == "" {
return v1.AzureDataDiskCachingReadWrite, nil
}
if !supportedCachingModes.Has(string(cachingMode)) {
return "", fmt.Errorf("azureDisk - %s is not supported cachingmode. Supported values are %s", cachingMode, supportedCachingModes.List())
}
return cachingMode, nil
}
type ioHandler interface {
ReadDir(dirname string) ([]os.FileInfo, error)
WriteFile(filename string, data []byte, perm os.FileMode) error
Readlink(name string) (string, error)
}
//TODO: check if priming the iscsi interface is actually needed
type osIOHandler struct{}
func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
return ioutil.ReadDir(dirname)
}
func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
return ioutil.WriteFile(filename, data, perm)
}
func (handler *osIOHandler) Readlink(name string) (string, error) {
return os.Readlink(name)
}
// exclude those used by azure as resource and OS root in /dev/disk/azure
func listAzureDiskPath(io ioHandler) []string {
azureDiskPath := "/dev/disk/azure/"
var azureDiskList []string
if dirs, err := io.ReadDir(azureDiskPath); err == nil {
for _, f := range dirs {
name := f.Name()
diskPath := azureDiskPath + name
if link, linkErr := io.Readlink(diskPath); linkErr == nil {
sd := link[(libstrings.LastIndex(link, "/") + 1):]
azureDiskList = append(azureDiskList, sd)
}
}
}
glog.V(12).Infof("Azure sys disks paths: %v", azureDiskList)
return azureDiskList
}
func scsiHostRescan(io ioHandler) {
scsi_path := "/sys/class/scsi_host/"
if dirs, err := io.ReadDir(scsi_path); err == nil {
for _, f := range dirs {
name := scsi_path + f.Name() + "/scan"
data := []byte("- - -")
if err = io.WriteFile(name, data, 0666); err != nil {
glog.Warningf("failed to rescan scsi host %s", name)
}
}
} else {
glog.Warningf("failed to read %s, err %v", scsi_path, err)
}
}
func findDiskByLun(lun int, io ioHandler, exe exec.Interface) (string, error) {
azureDisks := listAzureDiskPath(io)
return findDiskByLunWithConstraint(lun, io, exe, azureDisks)
}
// finds a device mounted to "current" node
func findDiskByLunWithConstraint(lun int, io ioHandler, exe exec.Interface, azureDisks []string) (string, error) {
var err error
sys_path := "/sys/bus/scsi/devices"
if dirs, err := io.ReadDir(sys_path); err == nil {
for _, f := range dirs {
name := f.Name()
// look for path like /sys/bus/scsi/devices/3:0:0:1
arr := libstrings.Split(name, ":")
if len(arr) < 4 {
continue
}
// extract LUN from the path.
// LUN is the last index of the array, i.e. 1 in /sys/bus/scsi/devices/3:0:0:1
l, err := strconv.Atoi(arr[3])
if err != nil {
// unknown path format, continue to read the next one
glog.V(4).Infof("azure disk - failed to parse lun from %v (%v), err %v", arr[3], name, err)
continue
}
if lun == l {
// find the matching LUN
// read vendor and model to ensure it is a VHD disk
vendor := path.Join(sys_path, name, "vendor")
model := path.Join(sys_path, name, "model")
out, err := exe.Command("cat", vendor, model).CombinedOutput()
if err != nil {
glog.V(4).Infof("azure disk - failed to cat device vendor and model, err: %v", err)
continue
}
matched, err := regexp.MatchString("^MSFT[ ]{0,}\nVIRTUAL DISK[ ]{0,}\n$", libstrings.ToUpper(string(out)))
if err != nil || !matched {
glog.V(4).Infof("azure disk - doesn't match VHD, output %v, error %v", string(out), err)
continue
}
// find a disk, validate name
dir := path.Join(sys_path, name, "block")
if dev, err := io.ReadDir(dir); err == nil {
found := false
for _, diskName := range azureDisks {
glog.V(12).Infof("azure disk - validating disk %q with sys disk %q", dev[0].Name(), diskName)
if string(dev[0].Name()) == diskName {
found = true
break
}
}
if !found {
return "/dev/" + dev[0].Name(), nil
}
}
}
}
}
return "", err
}
func formatIfNotFormatted(disk string, fstype string) {
notFormatted, err := diskLooksUnformatted(disk)
if err == nil && notFormatted {
args := []string{disk}
// Disk is unformatted so format it.
// Use 'ext4' as the default
if len(fstype) == 0 {
fstype = "ext4"
}
if fstype == "ext4" || fstype == "ext3" {
args = []string{"-E", "lazy_itable_init=0,lazy_journal_init=0", "-F", disk}
}
glog.Infof("azureDisk - Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", disk, fstype, args)
runner := exec.New()
cmd := runner.Command("mkfs."+fstype, args...)
_, err := cmd.CombinedOutput()
if err == nil {
// the disk has been formatted successfully try to mount it again.
glog.Infof("azureDisk - Disk successfully formatted (mkfs): %s - %s %s", fstype, disk, "tt")
}
glog.Warningf("azureDisk - format of disk %q failed: type:(%q) target:(%q) options:(%q)error:(%v)", disk, fstype, "tt", "o", err)
} else {
if err != nil {
glog.Warningf("azureDisk - Failed to check if the disk %s formatted with error %s, will attach anyway", disk, err)
} else {
glog.Infof("azureDisk - Disk %s already formatted, will not format", disk)
}
}
}
func diskLooksUnformatted(disk string) (bool, error) {
args := []string{"-nd", "-o", "FSTYPE", disk}
runner := exec.New()
cmd := runner.Command("lsblk", args...)
glog.V(4).Infof("Attempting to determine if disk %q is formatted using lsblk with args: (%v)", disk, args)
dataOut, err := cmd.CombinedOutput()
if err != nil {
glog.Errorf("Could not determine if disk %q is formatted (%v)", disk, err)
return false, err
}
output := libstrings.TrimSpace(string(dataOut))
return output == "", nil
}
func getDiskController(host volume.VolumeHost) (DiskController, error) {
cloudProvider := host.GetCloudProvider()
az, ok := cloudProvider.(*azure.Cloud)
if !ok || az == nil {
return nil, fmt.Errorf("AzureDisk - failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
}
return az, nil
}
func getCloud(host volume.VolumeHost) (*azure.Cloud, error) {
cloudProvider := host.GetCloudProvider()
az, ok := cloudProvider.(*azure.Cloud)
if !ok || az == nil {
return nil, fmt.Errorf("AzureDisk - failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
}
return az, nil
}
func strFirstLetterToUpper(str string) string {
if len(str) < 2 {
return str
}
return libstrings.ToUpper(string(str[0])) + str[1:]
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 The Kubernetes Authors.
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -17,67 +17,62 @@ limitations under the License.
package azure_dd
import (
"fmt"
"os"
"path"
"github.com/Azure/azure-sdk-for-go/arm/compute"
storage "github.com/Azure/azure-sdk-for-go/arm/storage"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/keymutex"
"k8s.io/kubernetes/pkg/util/mount"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&azureDataDiskPlugin{}}
}
// interface exposed by the cloud provider implementing Disk functionlity
type DiskController interface {
CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int, forceStandAlone bool) (string, error)
DeleteBlobDisk(diskUri string, wasForced bool) error
type azureDataDiskPlugin struct {
host volume.VolumeHost
volumeLocks keymutex.KeyMutex
}
CreateManagedDisk(diskName string, storageAccountType storage.SkuName, sizeGB int, tags map[string]string) (string, error)
DeleteManagedDisk(diskURI string) error
// Abstract interface to disk operations.
// azure cloud provider should implement it
type azureCloudProvider interface {
// Attaches the disk to the host machine.
AttachDisk(diskName, diskUri string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error
AttachDisk(isManagedDisk bool, diskName, diskUri string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error
// Detaches the disk, identified by disk name or uri, from the host machine.
DetachDiskByName(diskName, diskUri string, nodeName types.NodeName) error
// Check if a list of volumes are attached to the node with the specified NodeName
DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error)
// Get the LUN number of the disk that is attached to the host
GetDiskLun(diskName, diskUri string, nodeName types.NodeName) (int32, error)
// Get the next available LUN number to attach a new VHD
GetNextDiskLun(nodeName types.NodeName) (int32, error)
// InstanceID returns the cloud provider ID of the specified instance.
InstanceID(nodeName types.NodeName) (string, error)
// Create a VHD blob
CreateVolume(name, storageAccount, storageType, location string, requestGB int) (string, string, int, error)
CreateVolume(name, storageAccount string, storageAccountType storage.SkuName, location string, requestGB int) (string, string, int, error)
// Delete a VHD blob
DeleteVolume(name, uri string) error
DeleteVolume(diskURI string) error
}
type azureDataDiskPlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &azureDataDiskPlugin{}
var _ volume.PersistentVolumePlugin = &azureDataDiskPlugin{}
var _ volume.DeletableVolumePlugin = &azureDataDiskPlugin{}
var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{}
var _ volume.AttachableVolumePlugin = &azureDataDiskPlugin{}
const (
azureDataDiskPluginName = "kubernetes.io/azure-disk"
)
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&azureDataDiskPlugin{}}
}
func (plugin *azureDataDiskPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
plugin.volumeLocks = keymutex.NewKeyMutex()
return nil
}
@ -91,7 +86,7 @@ func (plugin *azureDataDiskPlugin) GetVolumeName(spec *volume.Spec) (string, err
return "", err
}
return volumeSource.DiskName, nil
return volumeSource.DataDiskURI, nil
}
func (plugin *azureDataDiskPlugin) CanSupport(spec *volume.Spec) bool {
@ -117,281 +112,104 @@ func (plugin *azureDataDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessM
}
}
func (plugin *azureDataDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod.UID, plugin.host.GetMounter())
}
func (plugin *azureDataDiskPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface) (volume.Mounter, error) {
// azures used directly in a pod have a ReadOnly flag set by the pod author.
// azures used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
azure, err := getVolumeSource(spec)
// NewAttacher initializes an Attacher
func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) {
azure, err := getCloud(plugin.host)
if err != nil {
glog.V(4).Infof("failed to get azure cloud in NewAttacher, plugin.host : %s", plugin.host.GetHostName())
return nil, err
}
fsType := "ext4"
if azure.FSType != nil {
fsType = *azure.FSType
}
cachingMode := v1.AzureDataDiskCachingNone
if azure.CachingMode != nil {
cachingMode = *azure.CachingMode
}
readOnly := false
if azure.ReadOnly != nil {
readOnly = *azure.ReadOnly
}
diskName := azure.DiskName
diskUri := azure.DataDiskURI
return &azureDiskMounter{
azureDisk: &azureDisk{
podUID: podUID,
volName: spec.Name(),
diskName: diskName,
diskUri: diskUri,
cachingMode: cachingMode,
mounter: mounter,
plugin: plugin,
},
fsType: fsType,
readOnly: readOnly,
diskMounter: &mount.SafeFormatAndMount{Interface: plugin.host.GetMounter(), Runner: exec.New()}}, nil
}
func (plugin *azureDataDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter())
}
func (plugin *azureDataDiskPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {
return &azureDiskUnmounter{
&azureDisk{
podUID: podUID,
volName: volName,
mounter: mounter,
plugin: plugin,
},
return &azureDiskAttacher{
plugin: plugin,
cloud: azure,
}, nil
}
func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) {
mounter := plugin.host.GetMounter()
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) {
azure, err := getCloud(plugin.host)
if err != nil {
glog.V(4).Infof("failed to get azure cloud in NewDetacher, plugin.host : %s", plugin.host.GetHostName())
return nil, err
}
return &azureDiskDetacher{
plugin: plugin,
cloud: azure,
}, nil
}
func (plugin *azureDataDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
volumeSource, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
azVolume := &v1.Volume{
Name: volName,
disk := makeDataDisk(spec.Name(), "", volumeSource.DiskName, plugin.host)
return &azureDiskDeleter{
spec: spec,
plugin: plugin,
dataDisk: disk,
}, nil
}
func (plugin *azureDataDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
if len(options.PVC.Spec.AccessModes) == 0 {
options.PVC.Spec.AccessModes = plugin.GetAccessModes()
}
return &azureDiskProvisioner{
plugin: plugin,
options: options,
}, nil
}
func (plugin *azureDataDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, options volume.VolumeOptions) (volume.Mounter, error) {
volumeSource, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
disk := makeDataDisk(spec.Name(), pod.UID, volumeSource.DiskName, plugin.host)
return &azureDiskMounter{
plugin: plugin,
spec: spec,
options: options,
dataDisk: disk,
}, nil
}
func (plugin *azureDataDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
disk := makeDataDisk(volName, podUID, "", plugin.host)
return &azureDiskUnmounter{
plugin: plugin,
dataDisk: disk,
}, nil
}
func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
mounter := plugin.host.GetMounter()
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
if err != nil {
return nil, err
}
azureVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: sourceName,
DataDiskURI: sourceName,
},
},
}
return volume.NewSpecFromVolume(azVolume), nil
return volume.NewSpecFromVolume(azureVolume), nil
}
func (plugin *azureDataDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter()
return mount.GetMountRefs(mounter, deviceMountPath)
}
type azureDisk struct {
volName string
podUID types.UID
diskName string
diskUri string
cachingMode v1.AzureDataDiskCachingMode
mounter mount.Interface
plugin *azureDataDiskPlugin
volume.MetricsNil
}
type azureDiskMounter struct {
*azureDisk
// Filesystem type, optional.
fsType string
// Specifies whether the disk will be attached as read-only.
readOnly bool
// diskMounter provides the interface that is used to mount the actual block device.
diskMounter *mount.SafeFormatAndMount
}
var _ volume.Mounter = &azureDiskMounter{}
func (b *azureDiskMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: b.readOnly,
Managed: !b.readOnly,
SupportsSELinux: true,
}
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (b *azureDiskMounter) CanMount() error {
return nil
}
// SetUp attaches the disk and bind mounts to the volume path.
func (b *azureDiskMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup)
}
// SetUpAt attaches the disk and bind mounts to the volume path.
func (b *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
b.plugin.volumeLocks.LockKey(b.diskName)
defer b.plugin.volumeLocks.UnlockKey(b.diskName)
// TODO: handle failed mounts here.
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
glog.V(4).Infof("DataDisk set up: %s %v %v", dir, !notMnt, err)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("IsLikelyNotMountPoint failed: %v", err)
return err
}
if !notMnt {
glog.V(4).Infof("%s is a mount point", dir)
return nil
}
globalPDPath := makeGlobalPDPath(b.plugin.host, b.diskName)
if err := os.MkdirAll(dir, 0750); err != nil {
glog.V(4).Infof("Could not create directory %s: %v", dir, err)
return err
}
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
options := []string{"bind"}
if b.readOnly {
options = append(options, "ro")
}
err = b.mounter.Mount(globalPDPath, dir, "", options)
if err != nil {
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notMnt {
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
glog.Errorf("Failed to unmount: %v", mntErr)
return err
}
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notMnt {
// This is very odd, we don't expect it. We'll try again next sync loop.
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir)
return err
}
}
os.Remove(dir)
return err
}
if !b.readOnly {
volume.SetVolumeOwnership(b, fsGroup)
}
glog.V(3).Infof("Azure disk volume %s mounted to %s", b.diskName, dir)
return nil
}
func makeGlobalPDPath(host volume.VolumeHost, volume string) string {
return path.Join(host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, volume)
}
func (azure *azureDisk) GetPath() string {
name := azureDataDiskPluginName
return azure.plugin.host.GetPodVolumeDir(azure.podUID, utilstrings.EscapeQualifiedNameForDisk(name), azure.volName)
}
type azureDiskUnmounter struct {
*azureDisk
}
var _ volume.Unmounter = &azureDiskUnmounter{}
// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (c *azureDiskUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath())
}
// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (c *azureDiskUnmounter) TearDownAt(dir string) error {
if pathExists, pathErr := util.PathExists(dir); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
return nil
}
notMnt, err := c.mounter.IsLikelyNotMountPoint(dir)
if err != nil {
glog.Errorf("Error checking if mountpoint %s: %v", dir, err)
return err
}
if notMnt {
glog.V(2).Info("Not mountpoint, deleting")
return os.Remove(dir)
}
// lock the volume (and thus wait for any concurrrent SetUpAt to finish)
c.plugin.volumeLocks.LockKey(c.diskName)
defer c.plugin.volumeLocks.UnlockKey(c.diskName)
refs, err := mount.GetMountRefs(c.mounter, dir)
if err != nil {
glog.Errorf("Error getting mountrefs for %s: %v", dir, err)
return err
}
if len(refs) == 0 {
glog.Errorf("Did not find pod-mount for %s during tear down", dir)
return fmt.Errorf("%s is not mounted", dir)
}
c.diskName = path.Base(refs[0])
glog.V(4).Infof("Found volume %s mounted to %s", c.diskName, dir)
// Unmount the bind-mount inside this pod
if err := c.mounter.Unmount(dir); err != nil {
glog.Errorf("Error unmounting dir %s %v", dir, err)
return err
}
notMnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if notMnt {
if err := os.Remove(dir); err != nil {
glog.Errorf("Error removing mountpoint %s %v", dir, err)
return err
}
}
return nil
}
func getVolumeSource(spec *volume.Spec) (*v1.AzureDiskVolumeSource, error) {
if spec.Volume != nil && spec.Volume.AzureDisk != nil {
return spec.Volume.AzureDisk, nil
}
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil {
return spec.PersistentVolume.Spec.AzureDisk, nil
}
return nil, fmt.Errorf("Spec does not reference an Azure disk volume type")
}
// Return cloud provider
func getAzureCloudProvider(cloudProvider cloudprovider.Interface) (azureCloudProvider, error) {
azureCloudProvider, ok := cloudProvider.(*azure.Cloud)
if !ok || azureCloudProvider == nil {
return nil, fmt.Errorf("Failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
}
return azureCloudProvider, nil
m := plugin.host.GetMounter()
return mount.GetMountRefs(m, deviceMountPath)
}

View File

@ -17,17 +17,11 @@ limitations under the License.
package azure_dd
import (
"fmt"
"os"
"path"
"testing"
"github.com/Azure/azure-sdk-for-go/arm/compute"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
@ -57,121 +51,5 @@ func TestCanSupport(t *testing.T) {
}
}
const (
fakeDiskName = "foo"
fakeDiskUri = "https://azure/vhds/bar.vhd"
fakeLun = 2
)
type fakeAzureProvider struct {
}
func (fake *fakeAzureProvider) AttachDisk(diskName, diskUri, vmName string, lun int32, cachingMode compute.CachingTypes) error {
if diskName != fakeDiskName || diskUri != fakeDiskUri || lun != fakeLun {
return fmt.Errorf("wrong disk")
}
return nil
}
func (fake *fakeAzureProvider) DetachDiskByName(diskName, diskUri, vmName string) error {
if diskName != fakeDiskName || diskUri != fakeDiskUri {
return fmt.Errorf("wrong disk")
}
return nil
}
func (fake *fakeAzureProvider) GetDiskLun(diskName, diskUri, vmName string) (int32, error) {
return int32(fakeLun), nil
}
func (fake *fakeAzureProvider) GetNextDiskLun(vmName string) (int32, error) {
return fakeLun, nil
}
func (fake *fakeAzureProvider) InstanceID(name string) (string, error) {
return "localhost", nil
}
func (fake *fakeAzureProvider) CreateVolume(name, storageAccount, storageType, location string, requestGB int) (string, string, int, error) {
return "", "", 0, fmt.Errorf("not implemented")
}
func (fake *fakeAzureProvider) DeleteVolume(name, uri string) error {
return fmt.Errorf("not implemented")
}
func TestPlugin(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("azure_ddTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName(azureDataDiskPluginName)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
fs := "ext4"
ro := false
caching := v1.AzureDataDiskCachingNone
spec := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: fakeDiskName,
DataDiskURI: fakeDiskUri,
FSType: &fs,
CachingMode: &caching,
ReadOnly: &ro,
},
},
}
mounter, err := plug.(*azureDataDiskPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~azure-disk/vol1")
path := mounter.GetPath()
if path != volPath {
t.Errorf("Got unexpected path: %s, should be %s", path, volPath)
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
unmounter, err := plug.(*azureDataDiskPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Errorf("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", path)
} else if !os.IsNotExist(err) {
t.Errorf("SetUp() failed: %v", err)
}
}
// fakeAzureProvider type was removed because all functions were not used
// Testing mounting will require path calculation which depends on the cloud provider, which is faked in the above test.

View File

@ -0,0 +1,184 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"fmt"
"os"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
type azureDiskMounter struct {
*dataDisk
spec *volume.Spec
plugin *azureDataDiskPlugin
options volume.VolumeOptions
}
type azureDiskUnmounter struct {
*dataDisk
plugin *azureDataDiskPlugin
}
var _ volume.Unmounter = &azureDiskUnmounter{}
var _ volume.Mounter = &azureDiskMounter{}
func (m *azureDiskMounter) GetAttributes() volume.Attributes {
volumeSource, _ := getVolumeSource(m.spec)
return volume.Attributes{
ReadOnly: *volumeSource.ReadOnly,
Managed: !*volumeSource.ReadOnly,
SupportsSELinux: true,
}
}
func (m *azureDiskMounter) CanMount() error {
return nil
}
func (m *azureDiskMounter) SetUp(fsGroup *int64) error {
return m.SetUpAt(m.GetPath(), fsGroup)
}
func (m *azureDiskMounter) GetPath() string {
return getPath(m.dataDisk.podUID, m.dataDisk.volumeName, m.plugin.host)
}
func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
mounter := m.plugin.host.GetMounter()
volumeSource, err := getVolumeSource(m.spec)
if err != nil {
glog.Infof("azureDisk - mounter failed to get volume source for spec %s", m.spec.Name())
return err
}
diskName := volumeSource.DiskName
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil && !os.IsNotExist(err) {
glog.Infof("azureDisk - cannot validate mount point for disk %s on %s %v", diskName, dir, err)
return err
}
if !mountPoint {
return fmt.Errorf("azureDisk - Not a mounting point for disk %s on %s", diskName, dir)
}
if err := os.MkdirAll(dir, 0750); err != nil {
glog.Infof("azureDisk - mkdir failed on disk %s on dir: %s (%v)", diskName, dir, err)
return err
}
options := []string{"bind"}
if *volumeSource.ReadOnly {
options = append(options, "ro")
}
glog.V(4).Infof("azureDisk - Attempting to mount %s on %s", diskName, dir)
isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk)
globalPDPath, err := makeGlobalPDPath(m.plugin.host, volumeSource.DataDiskURI, isManagedDisk)
if err != nil {
return err
}
mountErr := mounter.Mount(globalPDPath, dir, *volumeSource.FSType, options)
// Everything in the following control flow is meant as an
// attempt cleanup a failed setupAt (bind mount)
if mountErr != nil {
glog.Infof("azureDisk - SetupAt:Mount disk:%s at dir:%s failed during mounting with error:%v, will attempt to clean up", diskName, dir, mountErr)
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil {
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup IsLikelyNotMountPoint check failed for disk:%s on dir:%s with error %v original-mountErr:%v", diskName, dir, err, mountErr)
}
if !mountPoint {
if err = mounter.Unmount(dir); err != nil {
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup failed to unmount disk:%s on dir:%s with error:%v original-mountErr:%v", diskName, dir, err, mountErr)
}
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil {
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup IsLikelyNotMountPoint for disk:%s on dir:%s check failed with error:%v original-mountErr:%v", diskName, dir, err, mountErr)
}
if !mountPoint {
// not cool. leave for next sync loop.
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup disk %s is still mounted on %s during cleanup original-mountErr:%v, despite call to unmount(). Will try again next sync loop.", diskName, dir, mountErr)
}
}
if err = os.Remove(dir); err != nil {
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure error cleaning up (removing dir:%s) with error:%v original-mountErr:%v", dir, err, mountErr)
}
glog.V(2).Infof("azureDisk - Mount of disk:%s on dir:%s failed with mount error:%v post failure clean up was completed", diskName, dir, err, mountErr)
return mountErr
}
if !*volumeSource.ReadOnly {
volume.SetVolumeOwnership(m, fsGroup)
}
glog.V(2).Infof("azureDisk - successfully mounted disk %s on %s", diskName, dir)
return nil
}
func (u *azureDiskUnmounter) TearDown() error {
return u.TearDownAt(u.GetPath())
}
func (u *azureDiskUnmounter) TearDownAt(dir string) error {
if pathExists, pathErr := util.PathExists(dir); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
return nil
}
glog.V(4).Infof("azureDisk - TearDownAt: %s", dir)
mounter := u.plugin.host.GetMounter()
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil {
return fmt.Errorf("azureDisk - TearDownAt: %s failed to do IsLikelyNotMountPoint %s", dir, err)
}
if mountPoint {
if err := os.Remove(dir); err != nil {
return fmt.Errorf("azureDisk - TearDownAt: %s failed to do os.Remove %s", dir, err)
}
}
if err := mounter.Unmount(dir); err != nil {
return fmt.Errorf("azureDisk - TearDownAt: %s failed to do mounter.Unmount %s", dir, err)
}
mountPoint, err = mounter.IsLikelyNotMountPoint(dir)
if err != nil {
return fmt.Errorf("azureDisk - TearTownAt:IsLikelyNotMountPoint check failed: %v", err)
}
if mountPoint {
return os.Remove(dir)
}
return fmt.Errorf("azureDisk - failed to un-bind-mount volume dir")
}
func (u *azureDiskUnmounter) GetPath() string {
return getPath(u.dataDisk.podUID, u.dataDisk.volumeName, u.plugin.host)
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 The Kubernetes Authors.
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -20,147 +20,182 @@ import (
"fmt"
"strings"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
var _ volume.DeletableVolumePlugin = &azureDataDiskPlugin{}
var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{}
type azureDiskProvisioner struct {
plugin *azureDataDiskPlugin
options volume.VolumeOptions
}
type azureDiskDeleter struct {
*azureDisk
azureProvider azureCloudProvider
}
func (plugin *azureDataDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
glog.V(4).Infof("failed to get azure provider")
return nil, err
}
return plugin.newDeleterInternal(spec, azure)
}
func (plugin *azureDataDiskPlugin) newDeleterInternal(spec *volume.Spec, azure azureCloudProvider) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk == nil {
return nil, fmt.Errorf("invalid PV spec")
}
diskName := spec.PersistentVolume.Spec.AzureDisk.DiskName
diskUri := spec.PersistentVolume.Spec.AzureDisk.DataDiskURI
return &azureDiskDeleter{
azureDisk: &azureDisk{
volName: spec.Name(),
diskName: diskName,
diskUri: diskUri,
plugin: plugin,
},
azureProvider: azure,
}, nil
}
func (plugin *azureDataDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
glog.V(4).Infof("failed to get azure provider")
return nil, err
}
if len(options.PVC.Spec.AccessModes) == 0 {
options.PVC.Spec.AccessModes = plugin.GetAccessModes()
}
return plugin.newProvisionerInternal(options, azure)
}
func (plugin *azureDataDiskPlugin) newProvisionerInternal(options volume.VolumeOptions, azure azureCloudProvider) (volume.Provisioner, error) {
return &azureDiskProvisioner{
azureDisk: &azureDisk{
plugin: plugin,
},
azureProvider: azure,
options: options,
}, nil
}
var _ volume.Deleter = &azureDiskDeleter{}
func (d *azureDiskDeleter) GetPath() string {
name := azureDataDiskPluginName
return d.plugin.host.GetPodVolumeDir(d.podUID, utilstrings.EscapeQualifiedNameForDisk(name), d.volName)
}
func (d *azureDiskDeleter) Delete() error {
glog.V(4).Infof("deleting volume %s", d.diskUri)
return d.azureProvider.DeleteVolume(d.diskName, d.diskUri)
}
type azureDiskProvisioner struct {
*azureDisk
azureProvider azureCloudProvider
options volume.VolumeOptions
*dataDisk
spec *volume.Spec
plugin *azureDataDiskPlugin
}
var _ volume.Provisioner = &azureDiskProvisioner{}
var _ volume.Deleter = &azureDiskDeleter{}
func (a *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
if !volume.AccessModesContainedInAll(a.plugin.GetAccessModes(), a.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", a.options.PVC.Spec.AccessModes, a.plugin.GetAccessModes())
func (d *azureDiskDeleter) GetPath() string {
return getPath(d.podUID, d.dataDisk.diskName, d.plugin.host)
}
func (d *azureDiskDeleter) Delete() error {
volumeSource, err := getVolumeSource(d.spec)
if err != nil {
return err
}
var sku, location, account string
diskController, err := getDiskController(d.plugin.host)
if err != nil {
return err
}
wasStandAlone := (*volumeSource.Kind != v1.AzureSharedBlobDisk)
managed := (*volumeSource.Kind == v1.AzureManagedDisk)
if managed {
return diskController.DeleteManagedDisk(volumeSource.DataDiskURI)
}
return diskController.DeleteBlobDisk(volumeSource.DataDiskURI, wasStandAlone)
}
func (p *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
if !volume.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
}
supportedModes := p.plugin.GetAccessModes()
// perform static validation first
if p.options.PVC.Spec.Selector != nil {
return nil, fmt.Errorf("azureDisk - claim.Spec.Selector is not supported for dynamic provisioning on Azure disk")
}
if len(p.options.PVC.Spec.AccessModes) > 1 {
return nil, fmt.Errorf("AzureDisk - multiple access modes are not supported on AzureDisk plugin")
}
if len(p.options.PVC.Spec.AccessModes) == 1 {
if p.options.PVC.Spec.AccessModes[0] != supportedModes[0] {
return nil, fmt.Errorf("AzureDisk - mode %s is not supporetd by AzureDisk plugin supported mode is %s", p.options.PVC.Spec.AccessModes[0], supportedModes)
}
}
var (
location, account string
storageAccountType, fsType string
cachingMode v1.AzureDataDiskCachingMode
strKind string
err error
)
// maxLength = 79 - (4 for ".vhd") = 75
name := volume.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 75)
capacity := a.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
name := volume.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 75)
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestBytes := capacity.Value()
requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
// Apply ProvisionerParameters (case-insensitive). We leave validation of
// the values to the cloud provider.
for k, v := range a.options.Parameters {
for k, v := range p.options.Parameters {
switch strings.ToLower(k) {
case "skuname":
sku = v
storageAccountType = v
case "location":
location = v
case "storageaccount":
account = v
case "storageaccounttype":
storageAccountType = v
case "kind":
strKind = v
case "cachingmode":
cachingMode = v1.AzureDataDiskCachingMode(v)
case "fstype":
fsType = strings.ToLower(v)
default:
return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, a.plugin.GetPluginName())
return nil, fmt.Errorf("AzureDisk - invalid option %s in storage class", k)
}
}
// TODO: implement c.options.ProvisionerSelector parsing
if a.options.PVC.Spec.Selector != nil {
return nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Azure disk")
}
diskName, diskUri, sizeGB, err := a.azureProvider.CreateVolume(name, account, sku, location, requestGB)
// normalize values
fsType = normalizeFsType(fsType)
skuName, err := normalizeStorageAccountType(storageAccountType)
if err != nil {
return nil, err
}
kind, err := normalizeKind(strFirstLetterToUpper(strKind))
if err != nil {
return nil, err
}
if cachingMode, err = normalizeCachingMode(cachingMode); err != nil {
return nil, err
}
diskController, err := getDiskController(p.plugin.host)
if err != nil {
return nil, err
}
// create disk
diskURI := ""
if kind == v1.AzureManagedDisk {
diskURI, err = diskController.CreateManagedDisk(name, skuName, requestGB, *(p.options.CloudTags))
if err != nil {
return nil, err
}
} else {
forceStandAlone := (kind == v1.AzureDedicatedBlobDisk)
if kind == v1.AzureDedicatedBlobDisk {
if location != "" && account != "" {
// use dedicated kind (by default) for compatibility
_, diskURI, _, err = diskController.CreateVolume(name, account, skuName, location, requestGB)
if err != nil {
return nil, err
}
} else {
if location != "" || account != "" {
return nil, fmt.Errorf("AzureDisk - location(%s) and account(%s) must be both empty or specified for dedicated kind, only one value specified is not allowed",
location, account)
}
diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGB, forceStandAlone)
if err != nil {
return nil, err
}
}
} else {
diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGB, forceStandAlone)
if err != nil {
return nil, err
}
}
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: a.options.PVName,
Name: p.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
volumehelper.VolumeDynamicallyCreatedByKey: "azure-disk-dynamic-provisioner",
"volumehelper.VolumeDynamicallyCreatedByKey": "azure-disk-dynamic-provisioner",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: a.options.PersistentVolumeReclaimPolicy,
AccessModes: a.options.PVC.Spec.AccessModes,
PersistentVolumeReclaimPolicy: p.options.PersistentVolumeReclaimPolicy,
AccessModes: supportedModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", requestGB)),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: diskName,
DataDiskURI: diskUri,
CachingMode: &cachingMode,
DiskName: name,
DataDiskURI: diskURI,
Kind: &kind,
FSType: &fsType,
},
},
},

View File

@ -1,145 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"io/ioutil"
"os"
"path"
"regexp"
"strconv"
"strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/util/exec"
)
type ioHandler interface {
ReadDir(dirname string) ([]os.FileInfo, error)
WriteFile(filename string, data []byte, perm os.FileMode) error
Readlink(name string) (string, error)
}
type osIOHandler struct{}
func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
return ioutil.ReadDir(dirname)
}
func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
return ioutil.WriteFile(filename, data, perm)
}
func (handler *osIOHandler) Readlink(name string) (string, error) {
return os.Readlink(name)
}
// exclude those used by azure as resource and OS root in /dev/disk/azure
func listAzureDiskPath(io ioHandler) []string {
azureDiskPath := "/dev/disk/azure/"
var azureDiskList []string
if dirs, err := io.ReadDir(azureDiskPath); err == nil {
for _, f := range dirs {
name := f.Name()
diskPath := azureDiskPath + name
if link, linkErr := io.Readlink(diskPath); linkErr == nil {
sd := link[(strings.LastIndex(link, "/") + 1):]
azureDiskList = append(azureDiskList, sd)
}
}
}
glog.V(12).Infof("Azure sys disks paths: %v", azureDiskList)
return azureDiskList
}
// given a LUN find the VHD device path like /dev/sdd
// exclude those disks used by Azure resources and OS root
func findDiskByLun(lun int, io ioHandler, exe exec.Interface) (string, error) {
azureDisks := listAzureDiskPath(io)
return findDiskByLunWithConstraint(lun, io, exe, azureDisks)
}
// look for device /dev/sdX and validate it is a VHD
// return empty string if no disk is found
func findDiskByLunWithConstraint(lun int, io ioHandler, exe exec.Interface, azureDisks []string) (string, error) {
var err error
sys_path := "/sys/bus/scsi/devices"
if dirs, err := io.ReadDir(sys_path); err == nil {
for _, f := range dirs {
name := f.Name()
// look for path like /sys/bus/scsi/devices/3:0:0:1
arr := strings.Split(name, ":")
if len(arr) < 4 {
continue
}
// extract LUN from the path.
// LUN is the last index of the array, i.e. 1 in /sys/bus/scsi/devices/3:0:0:1
l, err := strconv.Atoi(arr[3])
if err != nil {
// unknown path format, continue to read the next one
glog.Errorf("failed to parse lun from %v (%v), err %v", arr[3], name, err)
continue
}
if lun == l {
// find the matching LUN
// read vendor and model to ensure it is a VHD disk
vendor := path.Join(sys_path, name, "vendor")
model := path.Join(sys_path, name, "model")
out, err := exe.Command("cat", vendor, model).CombinedOutput()
if err != nil {
glog.Errorf("failed to cat device vendor and model, err: %v", err)
continue
}
matched, err := regexp.MatchString("^MSFT[ ]{0,}\nVIRTUAL DISK[ ]{0,}\n$", strings.ToUpper(string(out)))
if err != nil || !matched {
glog.V(4).Infof("doesn't match VHD, output %v, error %v", string(out), err)
continue
}
// find a disk, validate name
dir := path.Join(sys_path, name, "block")
if dev, err := io.ReadDir(dir); err == nil {
found := false
for _, diskName := range azureDisks {
glog.V(12).Infof("validating disk %q with sys disk %q", dev[0].Name(), diskName)
if string(dev[0].Name()) == diskName {
found = true
break
}
}
if !found {
return "/dev/" + dev[0].Name(), nil
}
}
}
}
}
return "", err
}
// rescan scsi bus
func scsiHostRescan(io ioHandler) {
scsi_path := "/sys/class/scsi_host/"
if dirs, err := io.ReadDir(scsi_path); err == nil {
for _, f := range dirs {
name := scsi_path + f.Name() + "/scan"
data := []byte("- - -")
if err = io.WriteFile(name, data, 0666); err != nil {
glog.Errorf("failed to rescan scsi host %s", name)
}
}
} else {
glog.Errorf("failed to read %s, err %v", scsi_path, err)
}
}

View File

@ -716,16 +716,21 @@ func createPD(zone string) (string, error) {
} else if TestContext.Provider == "azure" {
pdName := fmt.Sprintf("%s-%s", TestContext.Prefix, string(uuid.NewUUID()))
azureCloud, err := GetAzureCloud()
if err != nil {
return "", err
}
_, diskUri, _, err := azureCloud.CreateVolume(pdName, "" /* account */, "" /* sku */, "" /* location */, 1 /* sizeGb */)
if azureCloud.BlobDiskController == nil {
return "", fmt.Errorf("BlobDiskController is nil, it's not expected.")
}
diskUri, err := azureCloud.BlobDiskController.CreateBlobDisk(pdName, "standard_lrs", 1, false)
if err != nil {
return "", err
}
return diskUri, nil
} else {
return "", fmt.Errorf("provider does not support volume creation")
}
@ -770,8 +775,11 @@ func deletePD(pdName string) error {
if err != nil {
return err
}
if azureCloud.BlobDiskController == nil {
return fmt.Errorf("BlobDiskController is nil, it's not expected.")
}
diskName := pdName[(strings.LastIndex(pdName, "/") + 1):]
err = azureCloud.DeleteVolume(diskName, pdName)
err = azureCloud.BlobDiskController.DeleteBlobDisk(diskName, false)
if err != nil {
Logf("failed to delete Azure volume %q: %v", pdName, err)
return err

1
vendor/BUILD vendored
View File

@ -19,6 +19,7 @@ filegroup(
"//vendor/cloud.google.com/go/internal:all-srcs",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:all-srcs",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry:all-srcs",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/disk:all-srcs",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/network:all-srcs",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:all-srcs",
"//vendor/github.com/Azure/azure-sdk-for-go/storage:all-srcs",

View File

@ -0,0 +1,40 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"client.go",
"disks.go",
"models.go",
"snapshots.go",
"version.go",
],
tags = ["automanaged"],
deps = [
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/date:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/validation:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,53 @@
// Package disk implements the Azure ARM Disk service API version
// 2016-04-30-preview.
//
// The Disk Resource Provider Client.
package disk
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0
// Changes may cause incorrect behavior and will be lost if the code is
// regenerated.
import (
"github.com/Azure/go-autorest/autorest"
)
const (
// DefaultBaseURI is the default URI used for the service Disk
DefaultBaseURI = "https://management.azure.com"
)
// ManagementClient is the base client for Disk.
type ManagementClient struct {
autorest.Client
BaseURI string
SubscriptionID string
}
// New creates an instance of the ManagementClient client.
func New(subscriptionID string) ManagementClient {
return NewWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewWithBaseURI creates an instance of the ManagementClient client.
func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient {
return ManagementClient{
Client: autorest.NewClientWithUserAgent(UserAgent()),
BaseURI: baseURI,
SubscriptionID: subscriptionID,
}
}

View File

@ -0,0 +1,728 @@
package disk
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0
// Changes may cause incorrect behavior and will be lost if the code is
// regenerated.
import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"net/http"
)
// DisksClient is the the Disk Resource Provider Client.
type DisksClient struct {
ManagementClient
}
// NewDisksClient creates an instance of the DisksClient client.
func NewDisksClient(subscriptionID string) DisksClient {
return NewDisksClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewDisksClientWithBaseURI creates an instance of the DisksClient client.
func NewDisksClientWithBaseURI(baseURI string, subscriptionID string) DisksClient {
return DisksClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate creates or updates a disk. This method may poll for
// completion. Polling can be canceled by passing the cancel channel argument.
// The channel will be used to cancel polling and any outstanding HTTP
// requests.
//
// resourceGroupName is the name of the resource group. diskName is the name of
// the disk within the given subscription and resource group. diskParameter is
// disk object supplied in the body of the Put disk operation.
func (client DisksClient) CreateOrUpdate(resourceGroupName string, diskName string, diskParameter Model, cancel <-chan struct{}) (<-chan Model, <-chan error) {
resultChan := make(chan Model, 1)
errChan := make(chan error, 1)
if err := validation.Validate([]validation.Validation{
{TargetValue: diskParameter,
Constraints: []validation.Constraint{{Target: "diskParameter.Properties", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "diskParameter.Properties.CreationData", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "diskParameter.Properties.CreationData.ImageReference", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "diskParameter.Properties.CreationData.ImageReference.ID", Name: validation.Null, Rule: true, Chain: nil}}},
}},
{Target: "diskParameter.Properties.EncryptionSettings", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "diskParameter.Properties.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "diskParameter.Properties.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil},
{Target: "diskParameter.Properties.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil},
}},
{Target: "diskParameter.Properties.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "diskParameter.Properties.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil},
{Target: "diskParameter.Properties.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil},
}},
}},
}}}}}); err != nil {
errChan <- validation.NewErrorWithValidationError(err, "disk.DisksClient", "CreateOrUpdate")
close(errChan)
close(resultChan)
return resultChan, errChan
}
go func() {
var err error
var result Model
defer func() {
resultChan <- result
errChan <- err
close(resultChan)
close(errChan)
}()
req, err := client.CreateOrUpdatePreparer(resourceGroupName, diskName, diskParameter, cancel)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.DisksClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
resp, err := client.CreateOrUpdateSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "disk.DisksClient", "CreateOrUpdate", resp, "Failure sending request")
return
}
result, err = client.CreateOrUpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.DisksClient", "CreateOrUpdate", resp, "Failure responding to request")
}
}()
return resultChan, errChan
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client DisksClient) CreateOrUpdatePreparer(resourceGroupName string, diskName string, diskParameter Model, cancel <-chan struct{}) (*http.Request, error) {
pathParameters := map[string]interface{}{
"diskName": autorest.Encode("path", diskName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-04-30-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsJSON(),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters),
autorest.WithJSON(diskParameter),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{Cancel: cancel})
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client DisksClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoPollForAsynchronous(client.PollingDelay))
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client DisksClient) CreateOrUpdateResponder(resp *http.Response) (result Model, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete deletes a disk. This method may poll for completion. Polling can be
// canceled by passing the cancel channel argument. The channel will be used to
// cancel polling and any outstanding HTTP requests.
//
// resourceGroupName is the name of the resource group. diskName is the name of
// the disk within the given subscription and resource group.
func (client DisksClient) Delete(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) {
resultChan := make(chan OperationStatusResponse, 1)
errChan := make(chan error, 1)
go func() {
var err error
var result OperationStatusResponse
defer func() {
resultChan <- result
errChan <- err
close(resultChan)
close(errChan)
}()
req, err := client.DeletePreparer(resourceGroupName, diskName, cancel)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.DisksClient", "Delete", nil, "Failure preparing request")
return
}
resp, err := client.DeleteSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "disk.DisksClient", "Delete", resp, "Failure sending request")
return
}
result, err = client.DeleteResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.DisksClient", "Delete", resp, "Failure responding to request")
}
}()
return resultChan, errChan
}
// DeletePreparer prepares the Delete request.
func (client DisksClient) DeletePreparer(resourceGroupName string, diskName string, cancel <-chan struct{}) (*http.Request, error) {
pathParameters := map[string]interface{}{
"diskName": autorest.Encode("path", diskName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-04-30-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{Cancel: cancel})
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client DisksClient) DeleteSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoPollForAsynchronous(client.PollingDelay))
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client DisksClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Get gets information about a disk.
//
// resourceGroupName is the name of the resource group. diskName is the name of
// the disk within the given subscription and resource group.
func (client DisksClient) Get(resourceGroupName string, diskName string) (result Model, err error) {
req, err := client.GetPreparer(resourceGroupName, diskName)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.DisksClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "disk.DisksClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.DisksClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client DisksClient) GetPreparer(resourceGroupName string, diskName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"diskName": autorest.Encode("path", diskName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-04-30-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client DisksClient) GetSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client DisksClient) GetResponder(resp *http.Response) (result Model, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GrantAccess grants access to a disk. This method may poll for completion.
// Polling can be canceled by passing the cancel channel argument. The channel
// will be used to cancel polling and any outstanding HTTP requests.
//
// resourceGroupName is the name of the resource group. diskName is the name of
// the disk within the given subscription and resource group. grantAccessData
// is access data object supplied in the body of the get disk access operation.
func (client DisksClient) GrantAccess(resourceGroupName string, diskName string, grantAccessData GrantAccessData, cancel <-chan struct{}) (<-chan AccessURI, <-chan error) {
resultChan := make(chan AccessURI, 1)
errChan := make(chan error, 1)
if err := validation.Validate([]validation.Validation{
{TargetValue: grantAccessData,
Constraints: []validation.Constraint{{Target: "grantAccessData.DurationInSeconds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
errChan <- validation.NewErrorWithValidationError(err, "disk.DisksClient", "GrantAccess")
close(errChan)
close(resultChan)
return resultChan, errChan
}
go func() {
var err error
var result AccessURI
defer func() {
resultChan <- result
errChan <- err
close(resultChan)
close(errChan)
}()
req, err := client.GrantAccessPreparer(resourceGroupName, diskName, grantAccessData, cancel)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.DisksClient", "GrantAccess", nil, "Failure preparing request")
return
}
resp, err := client.GrantAccessSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "disk.DisksClient", "GrantAccess", resp, "Failure sending request")
return
}
result, err = client.GrantAccessResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.DisksClient", "GrantAccess", resp, "Failure responding to request")
}
}()
return resultChan, errChan
}
// GrantAccessPreparer prepares the GrantAccess request.
func (client DisksClient) GrantAccessPreparer(resourceGroupName string, diskName string, grantAccessData GrantAccessData, cancel <-chan struct{}) (*http.Request, error) {
pathParameters := map[string]interface{}{
"diskName": autorest.Encode("path", diskName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-04-30-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsJSON(),
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess", pathParameters),
autorest.WithJSON(grantAccessData),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{Cancel: cancel})
}
// GrantAccessSender sends the GrantAccess request. The method will close the
// http.Response Body if it receives an error.
func (client DisksClient) GrantAccessSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoPollForAsynchronous(client.PollingDelay))
}
// GrantAccessResponder handles the response to the GrantAccess request. The method always
// closes the http.Response Body.
func (client DisksClient) GrantAccessResponder(resp *http.Response) (result AccessURI, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List lists all the disks under a subscription.
func (client DisksClient) List() (result ListType, err error) {
req, err := client.ListPreparer()
if err != nil {
err = autorest.NewErrorWithError(err, "disk.DisksClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "disk.DisksClient", "List", resp, "Failure sending request")
return
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.DisksClient", "List", resp, "Failure responding to request")
}
return
}
// ListPreparer prepares the List request.
func (client DisksClient) ListPreparer() (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-04-30-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client DisksClient) ListSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client DisksClient) ListResponder(resp *http.Response) (result ListType, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListNextResults retrieves the next set of results, if any.
func (client DisksClient) ListNextResults(lastResults ListType) (result ListType, err error) {
req, err := lastResults.ListTypePreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "disk.DisksClient", "List", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "disk.DisksClient", "List", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.DisksClient", "List", resp, "Failure responding to next results request")
}
return
}
// ListByResourceGroup lists all the disks under a resource group.
//
// resourceGroupName is the name of the resource group.
func (client DisksClient) ListByResourceGroup(resourceGroupName string) (result ListType, err error) {
req, err := client.ListByResourceGroupPreparer(resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.DisksClient", "ListByResourceGroup", nil, "Failure preparing request")
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "disk.DisksClient", "ListByResourceGroup", resp, "Failure sending request")
return
}
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.DisksClient", "ListByResourceGroup", resp, "Failure responding to request")
}
return
}
// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
func (client DisksClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-04-30-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
// http.Response Body if it receives an error.
func (client DisksClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
// closes the http.Response Body.
func (client DisksClient) ListByResourceGroupResponder(resp *http.Response) (result ListType, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListByResourceGroupNextResults retrieves the next set of results, if any.
func (client DisksClient) ListByResourceGroupNextResults(lastResults ListType) (result ListType, err error) {
req, err := lastResults.ListTypePreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "disk.DisksClient", "ListByResourceGroup", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "disk.DisksClient", "ListByResourceGroup", resp, "Failure sending next results request")
}
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.DisksClient", "ListByResourceGroup", resp, "Failure responding to next results request")
}
return
}
// RevokeAccess revokes access to a disk. This method may poll for completion.
// Polling can be canceled by passing the cancel channel argument. The channel
// will be used to cancel polling and any outstanding HTTP requests.
//
// resourceGroupName is the name of the resource group. diskName is the name of
// the disk within the given subscription and resource group.
func (client DisksClient) RevokeAccess(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) {
resultChan := make(chan OperationStatusResponse, 1)
errChan := make(chan error, 1)
go func() {
var err error
var result OperationStatusResponse
defer func() {
resultChan <- result
errChan <- err
close(resultChan)
close(errChan)
}()
req, err := client.RevokeAccessPreparer(resourceGroupName, diskName, cancel)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.DisksClient", "RevokeAccess", nil, "Failure preparing request")
return
}
resp, err := client.RevokeAccessSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "disk.DisksClient", "RevokeAccess", resp, "Failure sending request")
return
}
result, err = client.RevokeAccessResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.DisksClient", "RevokeAccess", resp, "Failure responding to request")
}
}()
return resultChan, errChan
}
// RevokeAccessPreparer prepares the RevokeAccess request.
func (client DisksClient) RevokeAccessPreparer(resourceGroupName string, diskName string, cancel <-chan struct{}) (*http.Request, error) {
pathParameters := map[string]interface{}{
"diskName": autorest.Encode("path", diskName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-04-30-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{Cancel: cancel})
}
// RevokeAccessSender sends the RevokeAccess request. The method will close the
// http.Response Body if it receives an error.
func (client DisksClient) RevokeAccessSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoPollForAsynchronous(client.PollingDelay))
}
// RevokeAccessResponder handles the response to the RevokeAccess request. The method always
// closes the http.Response Body.
func (client DisksClient) RevokeAccessResponder(resp *http.Response) (result OperationStatusResponse, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Update updates (patches) a disk. This method may poll for completion.
// Polling can be canceled by passing the cancel channel argument. The channel
// will be used to cancel polling and any outstanding HTTP requests.
//
// resourceGroupName is the name of the resource group. diskName is the name of
// the disk within the given subscription and resource group. diskParameter is
// disk object supplied in the body of the Patch disk operation.
func (client DisksClient) Update(resourceGroupName string, diskName string, diskParameter UpdateType, cancel <-chan struct{}) (<-chan Model, <-chan error) {
resultChan := make(chan Model, 1)
errChan := make(chan error, 1)
go func() {
var err error
var result Model
defer func() {
resultChan <- result
errChan <- err
close(resultChan)
close(errChan)
}()
req, err := client.UpdatePreparer(resourceGroupName, diskName, diskParameter, cancel)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.DisksClient", "Update", nil, "Failure preparing request")
return
}
resp, err := client.UpdateSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "disk.DisksClient", "Update", resp, "Failure sending request")
return
}
result, err = client.UpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.DisksClient", "Update", resp, "Failure responding to request")
}
}()
return resultChan, errChan
}
// UpdatePreparer prepares the Update request.
func (client DisksClient) UpdatePreparer(resourceGroupName string, diskName string, diskParameter UpdateType, cancel <-chan struct{}) (*http.Request, error) {
pathParameters := map[string]interface{}{
"diskName": autorest.Encode("path", diskName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-04-30-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsJSON(),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters),
autorest.WithJSON(diskParameter),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{Cancel: cancel})
}
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client DisksClient) UpdateSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoPollForAsynchronous(client.PollingDelay))
}
// UpdateResponder handles the response to the Update request. The method always
// closes the http.Response Body.
func (client DisksClient) UpdateResponder(resp *http.Response) (result Model, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}

View File

@ -0,0 +1,278 @@
package disk
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0
// Changes may cause incorrect behavior and will be lost if the code is
// regenerated.
import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/date"
"github.com/Azure/go-autorest/autorest/to"
"net/http"
)
// AccessLevel enumerates the values for access level.
type AccessLevel string
const (
// None specifies the none state for access level.
None AccessLevel = "None"
// Read specifies the read state for access level.
Read AccessLevel = "Read"
)
// CreateOption enumerates the values for create option.
type CreateOption string
const (
// Attach specifies the attach state for create option.
Attach CreateOption = "Attach"
// Copy specifies the copy state for create option.
Copy CreateOption = "Copy"
// Empty specifies the empty state for create option.
Empty CreateOption = "Empty"
// FromImage specifies the from image state for create option.
FromImage CreateOption = "FromImage"
// Import specifies the import state for create option.
Import CreateOption = "Import"
// Restore specifies the restore state for create option.
Restore CreateOption = "Restore"
)
// OperatingSystemTypes enumerates the values for operating system types.
type OperatingSystemTypes string
const (
// Linux specifies the linux state for operating system types.
Linux OperatingSystemTypes = "Linux"
// Windows specifies the windows state for operating system types.
Windows OperatingSystemTypes = "Windows"
)
// StorageAccountTypes enumerates the values for storage account types.
type StorageAccountTypes string
const (
// PremiumLRS specifies the premium lrs state for storage account types.
PremiumLRS StorageAccountTypes = "Premium_LRS"
// StandardLRS specifies the standard lrs state for storage account types.
StandardLRS StorageAccountTypes = "Standard_LRS"
)
// AccessURI is a disk access SAS uri.
type AccessURI struct {
autorest.Response `json:"-"`
*AccessURIOutput `json:"properties,omitempty"`
}
// AccessURIOutput is azure properties, including output.
type AccessURIOutput struct {
*AccessURIRaw `json:"output,omitempty"`
}
// AccessURIRaw is this object gets 'bubbled up' through flattening.
type AccessURIRaw struct {
AccessSAS *string `json:"accessSAS,omitempty"`
}
// APIError is api error.
type APIError struct {
Details *[]APIErrorBase `json:"details,omitempty"`
Innererror *InnerError `json:"innererror,omitempty"`
Code *string `json:"code,omitempty"`
Target *string `json:"target,omitempty"`
Message *string `json:"message,omitempty"`
}
// APIErrorBase is api error base.
type APIErrorBase struct {
Code *string `json:"code,omitempty"`
Target *string `json:"target,omitempty"`
Message *string `json:"message,omitempty"`
}
// CreationData is data used when creating a disk.
type CreationData struct {
CreateOption CreateOption `json:"createOption,omitempty"`
StorageAccountID *string `json:"storageAccountId,omitempty"`
ImageReference *ImageDiskReference `json:"imageReference,omitempty"`
SourceURI *string `json:"sourceUri,omitempty"`
SourceResourceID *string `json:"sourceResourceId,omitempty"`
}
// EncryptionSettings is encryption settings for disk or snapshot
type EncryptionSettings struct {
Enabled *bool `json:"enabled,omitempty"`
DiskEncryptionKey *KeyVaultAndSecretReference `json:"diskEncryptionKey,omitempty"`
KeyEncryptionKey *KeyVaultAndKeyReference `json:"keyEncryptionKey,omitempty"`
}
// GrantAccessData is data used for requesting a SAS.
type GrantAccessData struct {
Access AccessLevel `json:"access,omitempty"`
DurationInSeconds *int32 `json:"durationInSeconds,omitempty"`
}
// ImageDiskReference is the source image used for creating the disk.
type ImageDiskReference struct {
ID *string `json:"id,omitempty"`
Lun *int32 `json:"lun,omitempty"`
}
// InnerError is inner error details.
type InnerError struct {
Exceptiontype *string `json:"exceptiontype,omitempty"`
Errordetail *string `json:"errordetail,omitempty"`
}
// KeyVaultAndKeyReference is key Vault Key Url and vault id of KeK, KeK is
// optional and when provided is used to unwrap the encryptionKey
type KeyVaultAndKeyReference struct {
SourceVault *SourceVault `json:"sourceVault,omitempty"`
KeyURL *string `json:"keyUrl,omitempty"`
}
// KeyVaultAndSecretReference is key Vault Secret Url and vault id of the
// encryption key
type KeyVaultAndSecretReference struct {
SourceVault *SourceVault `json:"sourceVault,omitempty"`
SecretURL *string `json:"secretUrl,omitempty"`
}
// ListType is the List Disks operation response.
type ListType struct {
autorest.Response `json:"-"`
Value *[]Model `json:"value,omitempty"`
NextLink *string `json:"nextLink,omitempty"`
}
// ListTypePreparer prepares a request to retrieve the next set of results. It returns
// nil if no more results exist.
func (client ListType) ListTypePreparer() (*http.Request, error) {
if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
return nil, nil
}
return autorest.Prepare(&http.Request{},
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(client.NextLink)))
}
// Model is disk resource.
type Model struct {
autorest.Response `json:"-"`
ID *string `json:"id,omitempty"`
Name *string `json:"name,omitempty"`
Type *string `json:"type,omitempty"`
Location *string `json:"location,omitempty"`
Tags *map[string]*string `json:"tags,omitempty"`
*Properties `json:"properties,omitempty"`
}
// OperationStatusResponse is operation status response
type OperationStatusResponse struct {
autorest.Response `json:"-"`
Name *string `json:"name,omitempty"`
Status *string `json:"status,omitempty"`
StartTime *date.Time `json:"startTime,omitempty"`
EndTime *date.Time `json:"endTime,omitempty"`
Error *APIError `json:"error,omitempty"`
}
// Properties is disk resource properties.
type Properties struct {
AccountType StorageAccountTypes `json:"accountType,omitempty"`
TimeCreated *date.Time `json:"timeCreated,omitempty"`
OsType OperatingSystemTypes `json:"osType,omitempty"`
CreationData *CreationData `json:"creationData,omitempty"`
DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
EncryptionSettings *EncryptionSettings `json:"encryptionSettings,omitempty"`
OwnerID *string `json:"ownerId,omitempty"`
ProvisioningState *string `json:"provisioningState,omitempty"`
}
// Resource is the Resource model definition.
type Resource struct {
ID *string `json:"id,omitempty"`
Name *string `json:"name,omitempty"`
Type *string `json:"type,omitempty"`
Location *string `json:"location,omitempty"`
Tags *map[string]*string `json:"tags,omitempty"`
}
// ResourceUpdate is the Resource model definition.
type ResourceUpdate struct {
Tags *map[string]*string `json:"tags,omitempty"`
}
// Snapshot is snapshot resource.
type Snapshot struct {
autorest.Response `json:"-"`
ID *string `json:"id,omitempty"`
Name *string `json:"name,omitempty"`
Type *string `json:"type,omitempty"`
Location *string `json:"location,omitempty"`
Tags *map[string]*string `json:"tags,omitempty"`
*Properties `json:"properties,omitempty"`
}
// SnapshotList is the List Snapshots operation response.
type SnapshotList struct {
autorest.Response `json:"-"`
Value *[]Snapshot `json:"value,omitempty"`
NextLink *string `json:"nextLink,omitempty"`
}
// SnapshotListPreparer prepares a request to retrieve the next set of results. It returns
// nil if no more results exist.
func (client SnapshotList) SnapshotListPreparer() (*http.Request, error) {
if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
return nil, nil
}
return autorest.Prepare(&http.Request{},
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(client.NextLink)))
}
// SnapshotUpdate is snapshot update resource.
type SnapshotUpdate struct {
Tags *map[string]*string `json:"tags,omitempty"`
*UpdateProperties `json:"properties,omitempty"`
}
// SourceVault is the vault id is an Azure Resource Manager Resoure id in the
// form
// /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}
type SourceVault struct {
ID *string `json:"id,omitempty"`
}
// UpdateProperties is disk resource update properties.
type UpdateProperties struct {
AccountType StorageAccountTypes `json:"accountType,omitempty"`
OsType OperatingSystemTypes `json:"osType,omitempty"`
CreationData *CreationData `json:"creationData,omitempty"`
DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
EncryptionSettings *EncryptionSettings `json:"encryptionSettings,omitempty"`
}
// UpdateType is disk update resource.
type UpdateType struct {
Tags *map[string]*string `json:"tags,omitempty"`
*UpdateProperties `json:"properties,omitempty"`
}

View File

@ -0,0 +1,733 @@
package disk
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0
// Changes may cause incorrect behavior and will be lost if the code is
// regenerated.
import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"net/http"
)
// SnapshotsClient is the the Disk Resource Provider Client.
type SnapshotsClient struct {
ManagementClient
}
// NewSnapshotsClient creates an instance of the SnapshotsClient client.
func NewSnapshotsClient(subscriptionID string) SnapshotsClient {
return NewSnapshotsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewSnapshotsClientWithBaseURI creates an instance of the SnapshotsClient
// client.
func NewSnapshotsClientWithBaseURI(baseURI string, subscriptionID string) SnapshotsClient {
return SnapshotsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate creates or updates a snapshot. This method may poll for
// completion. Polling can be canceled by passing the cancel channel argument.
// The channel will be used to cancel polling and any outstanding HTTP
// requests.
//
// resourceGroupName is the name of the resource group. snapshotName is the
// name of the snapshot within the given subscription and resource group.
// snapshot is snapshot object supplied in the body of the Put disk operation.
func (client SnapshotsClient) CreateOrUpdate(resourceGroupName string, snapshotName string, snapshot Snapshot, cancel <-chan struct{}) (<-chan Snapshot, <-chan error) {
resultChan := make(chan Snapshot, 1)
errChan := make(chan error, 1)
if err := validation.Validate([]validation.Validation{
{TargetValue: snapshot,
Constraints: []validation.Constraint{{Target: "snapshot.Properties", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "snapshot.Properties.CreationData", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "snapshot.Properties.CreationData.ImageReference", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "snapshot.Properties.CreationData.ImageReference.ID", Name: validation.Null, Rule: true, Chain: nil}}},
}},
{Target: "snapshot.Properties.EncryptionSettings", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "snapshot.Properties.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "snapshot.Properties.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil},
{Target: "snapshot.Properties.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil},
}},
{Target: "snapshot.Properties.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "snapshot.Properties.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil},
{Target: "snapshot.Properties.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil},
}},
}},
}}}}}); err != nil {
errChan <- validation.NewErrorWithValidationError(err, "disk.SnapshotsClient", "CreateOrUpdate")
close(errChan)
close(resultChan)
return resultChan, errChan
}
go func() {
var err error
var result Snapshot
defer func() {
resultChan <- result
errChan <- err
close(resultChan)
close(errChan)
}()
req, err := client.CreateOrUpdatePreparer(resourceGroupName, snapshotName, snapshot, cancel)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
resp, err := client.CreateOrUpdateSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "CreateOrUpdate", resp, "Failure sending request")
return
}
result, err = client.CreateOrUpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "CreateOrUpdate", resp, "Failure responding to request")
}
}()
return resultChan, errChan
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client SnapshotsClient) CreateOrUpdatePreparer(resourceGroupName string, snapshotName string, snapshot Snapshot, cancel <-chan struct{}) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"snapshotName": autorest.Encode("path", snapshotName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-04-30-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsJSON(),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters),
autorest.WithJSON(snapshot),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{Cancel: cancel})
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client SnapshotsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoPollForAsynchronous(client.PollingDelay))
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client SnapshotsClient) CreateOrUpdateResponder(resp *http.Response) (result Snapshot, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete deletes a snapshot. This method may poll for completion. Polling can
// be canceled by passing the cancel channel argument. The channel will be used
// to cancel polling and any outstanding HTTP requests.
//
// resourceGroupName is the name of the resource group. snapshotName is the
// name of the snapshot within the given subscription and resource group.
func (client SnapshotsClient) Delete(resourceGroupName string, snapshotName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) {
resultChan := make(chan OperationStatusResponse, 1)
errChan := make(chan error, 1)
go func() {
var err error
var result OperationStatusResponse
defer func() {
resultChan <- result
errChan <- err
close(resultChan)
close(errChan)
}()
req, err := client.DeletePreparer(resourceGroupName, snapshotName, cancel)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Delete", nil, "Failure preparing request")
return
}
resp, err := client.DeleteSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Delete", resp, "Failure sending request")
return
}
result, err = client.DeleteResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Delete", resp, "Failure responding to request")
}
}()
return resultChan, errChan
}
// DeletePreparer prepares the Delete request.
func (client SnapshotsClient) DeletePreparer(resourceGroupName string, snapshotName string, cancel <-chan struct{}) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"snapshotName": autorest.Encode("path", snapshotName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-04-30-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{Cancel: cancel})
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client SnapshotsClient) DeleteSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoPollForAsynchronous(client.PollingDelay))
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client SnapshotsClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Get gets information about a snapshot.
//
// resourceGroupName is the name of the resource group. snapshotName is the
// name of the snapshot within the given subscription and resource group.
func (client SnapshotsClient) Get(resourceGroupName string, snapshotName string) (result Snapshot, err error) {
req, err := client.GetPreparer(resourceGroupName, snapshotName)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client SnapshotsClient) GetPreparer(resourceGroupName string, snapshotName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"snapshotName": autorest.Encode("path", snapshotName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-04-30-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client SnapshotsClient) GetSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client SnapshotsClient) GetResponder(resp *http.Response) (result Snapshot, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GrantAccess grants access to a snapshot. This method may poll for
// completion. Polling can be canceled by passing the cancel channel argument.
// The channel will be used to cancel polling and any outstanding HTTP
// requests.
//
// resourceGroupName is the name of the resource group. snapshotName is the
// name of the snapshot within the given subscription and resource group.
// grantAccessData is access data object supplied in the body of the get
// snapshot access operation.
func (client SnapshotsClient) GrantAccess(resourceGroupName string, snapshotName string, grantAccessData GrantAccessData, cancel <-chan struct{}) (<-chan AccessURI, <-chan error) {
resultChan := make(chan AccessURI, 1)
errChan := make(chan error, 1)
if err := validation.Validate([]validation.Validation{
{TargetValue: grantAccessData,
Constraints: []validation.Constraint{{Target: "grantAccessData.DurationInSeconds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
errChan <- validation.NewErrorWithValidationError(err, "disk.SnapshotsClient", "GrantAccess")
close(errChan)
close(resultChan)
return resultChan, errChan
}
go func() {
var err error
var result AccessURI
defer func() {
resultChan <- result
errChan <- err
close(resultChan)
close(errChan)
}()
req, err := client.GrantAccessPreparer(resourceGroupName, snapshotName, grantAccessData, cancel)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "GrantAccess", nil, "Failure preparing request")
return
}
resp, err := client.GrantAccessSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "GrantAccess", resp, "Failure sending request")
return
}
result, err = client.GrantAccessResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "GrantAccess", resp, "Failure responding to request")
}
}()
return resultChan, errChan
}
// GrantAccessPreparer prepares the GrantAccess request.
func (client SnapshotsClient) GrantAccessPreparer(resourceGroupName string, snapshotName string, grantAccessData GrantAccessData, cancel <-chan struct{}) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"snapshotName": autorest.Encode("path", snapshotName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-04-30-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsJSON(),
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess", pathParameters),
autorest.WithJSON(grantAccessData),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{Cancel: cancel})
}
// GrantAccessSender sends the GrantAccess request. The method will close the
// http.Response Body if it receives an error.
func (client SnapshotsClient) GrantAccessSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoPollForAsynchronous(client.PollingDelay))
}
// GrantAccessResponder handles the response to the GrantAccess request. The method always
// closes the http.Response Body.
func (client SnapshotsClient) GrantAccessResponder(resp *http.Response) (result AccessURI, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List lists snapshots under a subscription.
func (client SnapshotsClient) List() (result SnapshotList, err error) {
req, err := client.ListPreparer()
if err != nil {
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "List", resp, "Failure sending request")
return
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "List", resp, "Failure responding to request")
}
return
}
// ListPreparer prepares the List request.
func (client SnapshotsClient) ListPreparer() (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-04-30-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/snapshots", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client SnapshotsClient) ListSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client SnapshotsClient) ListResponder(resp *http.Response) (result SnapshotList, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListNextResults retrieves the next set of results, if any.
func (client SnapshotsClient) ListNextResults(lastResults SnapshotList) (result SnapshotList, err error) {
req, err := lastResults.SnapshotListPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "disk.SnapshotsClient", "List", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "disk.SnapshotsClient", "List", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "List", resp, "Failure responding to next results request")
}
return
}
// ListByResourceGroup lists snapshots under a resource group.
//
// resourceGroupName is the name of the resource group.
func (client SnapshotsClient) ListByResourceGroup(resourceGroupName string) (result SnapshotList, err error) {
req, err := client.ListByResourceGroupPreparer(resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "ListByResourceGroup", nil, "Failure preparing request")
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "ListByResourceGroup", resp, "Failure sending request")
return
}
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "ListByResourceGroup", resp, "Failure responding to request")
}
return
}
// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
func (client SnapshotsClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-04-30-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
// http.Response Body if it receives an error.
func (client SnapshotsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
// closes the http.Response Body.
func (client SnapshotsClient) ListByResourceGroupResponder(resp *http.Response) (result SnapshotList, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListByResourceGroupNextResults retrieves the next set of results, if any.
func (client SnapshotsClient) ListByResourceGroupNextResults(lastResults SnapshotList) (result SnapshotList, err error) {
req, err := lastResults.SnapshotListPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "disk.SnapshotsClient", "ListByResourceGroup", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "disk.SnapshotsClient", "ListByResourceGroup", resp, "Failure sending next results request")
}
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "ListByResourceGroup", resp, "Failure responding to next results request")
}
return
}
// RevokeAccess revokes access to a snapshot. This method may poll for
// completion. Polling can be canceled by passing the cancel channel argument.
// The channel will be used to cancel polling and any outstanding HTTP
// requests.
//
// resourceGroupName is the name of the resource group. snapshotName is the
// name of the snapshot within the given subscription and resource group.
func (client SnapshotsClient) RevokeAccess(resourceGroupName string, snapshotName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) {
resultChan := make(chan OperationStatusResponse, 1)
errChan := make(chan error, 1)
go func() {
var err error
var result OperationStatusResponse
defer func() {
resultChan <- result
errChan <- err
close(resultChan)
close(errChan)
}()
req, err := client.RevokeAccessPreparer(resourceGroupName, snapshotName, cancel)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "RevokeAccess", nil, "Failure preparing request")
return
}
resp, err := client.RevokeAccessSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "RevokeAccess", resp, "Failure sending request")
return
}
result, err = client.RevokeAccessResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "RevokeAccess", resp, "Failure responding to request")
}
}()
return resultChan, errChan
}
// RevokeAccessPreparer prepares the RevokeAccess request.
func (client SnapshotsClient) RevokeAccessPreparer(resourceGroupName string, snapshotName string, cancel <-chan struct{}) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"snapshotName": autorest.Encode("path", snapshotName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-04-30-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{Cancel: cancel})
}
// RevokeAccessSender sends the RevokeAccess request. The method will close the
// http.Response Body if it receives an error.
func (client SnapshotsClient) RevokeAccessSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoPollForAsynchronous(client.PollingDelay))
}
// RevokeAccessResponder handles the response to the RevokeAccess request. The method always
// closes the http.Response Body.
func (client SnapshotsClient) RevokeAccessResponder(resp *http.Response) (result OperationStatusResponse, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Update updates (patches) a snapshot. This method may poll for completion.
// Polling can be canceled by passing the cancel channel argument. The channel
// will be used to cancel polling and any outstanding HTTP requests.
//
// resourceGroupName is the name of the resource group. snapshotName is the
// name of the snapshot within the given subscription and resource group.
// snapshot is snapshot object supplied in the body of the Patch snapshot
// operation.
func (client SnapshotsClient) Update(resourceGroupName string, snapshotName string, snapshot SnapshotUpdate, cancel <-chan struct{}) (<-chan Snapshot, <-chan error) {
resultChan := make(chan Snapshot, 1)
errChan := make(chan error, 1)
go func() {
var err error
var result Snapshot
defer func() {
resultChan <- result
errChan <- err
close(resultChan)
close(errChan)
}()
req, err := client.UpdatePreparer(resourceGroupName, snapshotName, snapshot, cancel)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Update", nil, "Failure preparing request")
return
}
resp, err := client.UpdateSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Update", resp, "Failure sending request")
return
}
result, err = client.UpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Update", resp, "Failure responding to request")
}
}()
return resultChan, errChan
}
// UpdatePreparer prepares the Update request.
func (client SnapshotsClient) UpdatePreparer(resourceGroupName string, snapshotName string, snapshot SnapshotUpdate, cancel <-chan struct{}) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"snapshotName": autorest.Encode("path", snapshotName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-04-30-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsJSON(),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters),
autorest.WithJSON(snapshot),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{Cancel: cancel})
}
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client SnapshotsClient) UpdateSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoPollForAsynchronous(client.PollingDelay))
}
// UpdateResponder handles the response to the Update request. The method always
// closes the http.Response Body.
func (client SnapshotsClient) UpdateResponder(resp *http.Response) (result Snapshot, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}

View File

@ -0,0 +1,29 @@
package disk
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0
// Changes may cause incorrect behavior and will be lost if the code is
// regenerated.
// UserAgent returns the UserAgent string to use when sending http.Requests.
func UserAgent() string {
return "Azure-SDK-For-Go/v10.0.2-beta arm-disk/2016-04-30-preview"
}
// Version returns the semantic version (see http://semver.org) of the client.
func Version() string {
return "v10.0.2-beta"
}