mirror of https://github.com/k3s-io/k3s
Merge pull request #75742 from rhockenbury/automated-cherry-pick-of-#75515-upstream-release-1.14
Automated cherry pick of #75515: godeps: update vmware/govmomi to v0.20 releasepull/564/head
commit
b5e1ea4a5e
|
@ -3319,168 +3319,168 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/find",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/list",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/lookup",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/lookup/methods",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/lookup/simulator",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/lookup/types",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/nfc",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/object",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/pbm",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/pbm/methods",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/pbm/types",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/property",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/session",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/simulator",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/simulator/esx",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/simulator/vpx",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/sts",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/sts/internal",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/sts/simulator",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/task",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/vapi/internal",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/vapi/rest",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/vapi/simulator",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/vapi/tags",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/vim25",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/vim25/debug",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/vim25/methods",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/vim25/mo",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/vim25/progress",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/vim25/soap",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/vim25/types",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/govmomi/vim25/xml",
|
||||
"Comment": "v0.18.0-48-g22f74650cf39ba",
|
||||
"Rev": "22f74650cf39ba4649fba45e770df0f44df6f758"
|
||||
"Comment": "v0.20.0",
|
||||
"Rev": "bdf05b6cab86b1e9f40ee80a4d2cb07a0c25ef78"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vmware/photon-controller-go-sdk/SSPI",
|
||||
|
|
|
@ -86,34 +86,27 @@ func (connection *VSphereConnection) Connect(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// login calls SessionManager.LoginByToken if certificate and private key are configured,
|
||||
// otherwise calls SessionManager.Login with user and password.
|
||||
func (connection *VSphereConnection) login(ctx context.Context, client *vim25.Client) error {
|
||||
m := session.NewManager(client)
|
||||
connection.credentialsLock.Lock()
|
||||
defer connection.credentialsLock.Unlock()
|
||||
|
||||
// Signer returns an sts.Signer for use with SAML token auth if connection is configured for such.
|
||||
// Returns nil if username/password auth is configured for the connection.
|
||||
func (connection *VSphereConnection) Signer(ctx context.Context, client *vim25.Client) (*sts.Signer, error) {
|
||||
// TODO: Add separate fields for certificate and private-key.
|
||||
// For now we can leave the config structs and validation as-is and
|
||||
// decide to use LoginByToken if the username value is PEM encoded.
|
||||
b, _ := pem.Decode([]byte(connection.Username))
|
||||
if b == nil {
|
||||
klog.V(3).Infof("SessionManager.Login with username '%s'", connection.Username)
|
||||
return m.Login(ctx, neturl.UserPassword(connection.Username, connection.Password))
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
klog.V(3).Infof("SessionManager.LoginByToken with certificate '%s'", connection.Username)
|
||||
|
||||
cert, err := tls.X509KeyPair([]byte(connection.Username), []byte(connection.Password))
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to load X509 key pair. err: %+v", err)
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tokens, err := sts.NewClient(ctx, client)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to create STS client. err: %+v", err)
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req := sts.TokenRequest{
|
||||
|
@ -123,9 +116,31 @@ func (connection *VSphereConnection) login(ctx context.Context, client *vim25.Cl
|
|||
signer, err := tokens.Issue(ctx, req)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to issue SAML token. err: %+v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return signer, nil
|
||||
}
|
||||
|
||||
// login calls SessionManager.LoginByToken if certificate and private key are configured,
|
||||
// otherwise calls SessionManager.Login with user and password.
|
||||
func (connection *VSphereConnection) login(ctx context.Context, client *vim25.Client) error {
|
||||
m := session.NewManager(client)
|
||||
connection.credentialsLock.Lock()
|
||||
defer connection.credentialsLock.Unlock()
|
||||
|
||||
signer, err := connection.Signer(ctx, client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if signer == nil {
|
||||
klog.V(3).Infof("SessionManager.Login with username %q", connection.Username)
|
||||
return m.Login(ctx, neturl.UserPassword(connection.Username, connection.Password))
|
||||
}
|
||||
|
||||
klog.V(3).Infof("SessionManager.LoginByToken with certificate %q", connection.Username)
|
||||
|
||||
header := soap.Header{Security: signer}
|
||||
|
||||
return m.LoginByToken(client.WithHeader(ctx, header))
|
||||
|
|
|
@ -38,7 +38,7 @@ import (
|
|||
"github.com/vmware/govmomi/vapi/tags"
|
||||
"github.com/vmware/govmomi/vim25/mo"
|
||||
vmwaretypes "github.com/vmware/govmomi/vim25/types"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/informers"
|
||||
|
@ -1432,10 +1432,20 @@ func (vs *VSphere) NodeManager() (nodeManager *NodeManager) {
|
|||
|
||||
func withTagsClient(ctx context.Context, connection *vclib.VSphereConnection, f func(c *rest.Client) error) error {
|
||||
c := rest.NewClient(connection.Client)
|
||||
user := url.UserPassword(connection.Username, connection.Password)
|
||||
if err := c.Login(ctx, user); err != nil {
|
||||
signer, err := connection.Signer(ctx, connection.Client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if signer == nil {
|
||||
user := url.UserPassword(connection.Username, connection.Password)
|
||||
err = c.Login(ctx, user)
|
||||
} else {
|
||||
err = c.LoginByToken(c.WithSigner(ctx, signer))
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := c.Logout(ctx); err != nil {
|
||||
klog.Errorf("failed to logout: %v", err)
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"crypto/x509"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
@ -40,7 +41,7 @@ import (
|
|||
"github.com/vmware/govmomi/vapi/tags"
|
||||
"github.com/vmware/govmomi/vim25/mo"
|
||||
vmwaretypes "github.com/vmware/govmomi/vim25/types"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
|
@ -343,6 +344,10 @@ func TestZones(t *testing.T) {
|
|||
cfg, cleanup := configFromSim()
|
||||
defer cleanup()
|
||||
|
||||
// Configure for SAML token auth
|
||||
cfg.Global.User = localhostCert
|
||||
cfg.Global.Password = localhostKey
|
||||
|
||||
// Create vSphere configuration object
|
||||
vs, err := newControllerNode(cfg)
|
||||
if err != nil {
|
||||
|
@ -381,6 +386,13 @@ func TestZones(t *testing.T) {
|
|||
|
||||
// Tag manager instance
|
||||
m := tags.NewManager(rest.NewClient(vsi.conn.Client))
|
||||
signer, err := vsi.conn.Signer(ctx, vsi.conn.Client)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = m.LoginByToken(m.WithSigner(ctx, signer)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a region category
|
||||
regionID, err := m.CreateCategory(ctx, &tags.Category{Name: vs.cfg.Labels.Region})
|
||||
|
@ -560,6 +572,10 @@ func TestGetZoneToHosts(t *testing.T) {
|
|||
|
||||
// Tag manager instance
|
||||
m := tags.NewManager(rest.NewClient(vsi.conn.Client))
|
||||
user := url.UserPassword(vsi.conn.Username, vsi.conn.Password)
|
||||
if err = m.Login(ctx, user); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a region category
|
||||
regionCat, err := m.CreateCategory(ctx, &tags.Category{Name: vs.cfg.Labels.Region})
|
||||
|
|
|
@ -1,2 +1,3 @@
|
|||
secrets.yml
|
||||
dist/
|
||||
.idea/
|
||||
|
|
|
@ -1,6 +1,11 @@
|
|||
Amanda H. L. de Andrade <amanda.andrade@serpro.gov.br> amandahla <amanda.andrade@serpro.gov.br>
|
||||
Amit Bathla <abathla@.vmware.com> <abathla@promb-1s-dhcp216.eng.vmware.com>
|
||||
Andrew Kutz <akutz@vmware.com> akutz <akutz@vmware.com>
|
||||
Andrew Kutz <akutz@vmware.com> <sakutz@gmail.com>
|
||||
Andrew Kutz <akutz@vmware.com> Andrew Kutz <101085+akutz@users.noreply.github.com>
|
||||
Bruce Downs <bruceadowns@gmail.com> <bdowns@vmware.com>
|
||||
Bruce Downs <bruceadowns@gmail.com> <bruce.downs@jivesoftware.com>
|
||||
Bruce Downs <bruceadowns@gmail.com> <bruce.downs@autodesk.com>
|
||||
Clint Greenwood <cgreenwood@vmware.com> <clint.greenwood@gmail.com>
|
||||
Cédric Blomart <cblomart@gmail.com> <cedric.blomart@minfin.fed.be>
|
||||
Cédric Blomart <cblomart@gmail.com> cedric <cblomart@gmail.com>
|
||||
|
@ -18,3 +23,6 @@ Anfernee Yongkun Gui <agui@vmware.com> <anfernee.gui@gmail.com>
|
|||
Anfernee Yongkun Gui <agui@vmware.com> Yongkun Anfernee Gui <agui@vmware.com>
|
||||
Zach Tucker <ztucker@vmware.com> <jzt@users.noreply.github.com>
|
||||
Zee Yang <zeey@vmware.com> <zee.yang@gmail.com>
|
||||
Jiatong Wang <wjiatong@vmware.com> jiatongw <wjiatong@vmware.com>
|
||||
Uwe Bessle <Uwe.Bessle@iteratec.de> Uwe Bessle <u.bessle.extern@eos-ts.com>
|
||||
Uwe Bessle <Uwe.Bessle@iteratec.de> Uwe Bessle <uwe.bessle@web.de>
|
||||
|
|
|
@ -1,28 +1,95 @@
|
|||
sudo: required
|
||||
# Use the newer Travis-CI build templates based on the
|
||||
# Debian Linux distribution "Trusty" release.
|
||||
os: linux
|
||||
dist: trusty
|
||||
|
||||
language: go
|
||||
# Disable sudo for all builds by default. This ensures all jobs use
|
||||
# Travis-CI's containerized build environment unless specified otherwise.
|
||||
# The container builds have *much* shorter queue times than the VM-based
|
||||
# build environment on which the sudo builds depend.
|
||||
sudo: false
|
||||
services: false
|
||||
|
||||
go:
|
||||
- '1.10'
|
||||
# Set the version of Go.
|
||||
language: go
|
||||
go: 1.11
|
||||
|
||||
# Always set the project's Go import path to ensure that forked
|
||||
# builds get cloned to the correct location.
|
||||
go_import_path: github.com/vmware/govmomi
|
||||
|
||||
before_install:
|
||||
- sudo apt-get -qq update
|
||||
- sudo apt-get install -y xmlstarlet
|
||||
- make vendor
|
||||
# Ensure all the jobs know where the temp directory is.
|
||||
env:
|
||||
global: TMPDIR=/tmp
|
||||
|
||||
script:
|
||||
- make check test
|
||||
- GOOS=windows make install
|
||||
jobs:
|
||||
include:
|
||||
|
||||
after_success:
|
||||
- test -n "$TRAVIS_TAG" && docker login -u="$DOCKER_USERNAME" -p="$DOCKER_PASSWORD"
|
||||
# The "lint" stage runs the various linters against the project.
|
||||
- &lint-stage
|
||||
stage: lint
|
||||
env: LINTER=govet
|
||||
install: true
|
||||
script: make "${LINTER}"
|
||||
|
||||
deploy:
|
||||
- provider: script
|
||||
skip_cleanup: true
|
||||
script: curl -sL http://git.io/goreleaser | bash
|
||||
on:
|
||||
tags: true
|
||||
condition: $TRAVIS_OS_NAME = linux
|
||||
go: '1.10'
|
||||
- <<: *lint-stage
|
||||
env: LINTER=goimports
|
||||
|
||||
# The "build" stage verifies the program can be built against the
|
||||
# various GOOS and GOARCH combinations found in the Go releaser
|
||||
# config file, ".goreleaser.yml".
|
||||
- &build-stage
|
||||
stage: build
|
||||
env: GOOS=linux GOARCH=amd64
|
||||
install: true
|
||||
script: make install
|
||||
|
||||
- <<: *build-stage
|
||||
env: GOOS=linux GOARCH=386
|
||||
|
||||
- <<: *build-stage
|
||||
env: GOOS=darwin GOARCH=amd64
|
||||
- <<: *build-stage
|
||||
env: GOOS=darwin GOARCH=386
|
||||
|
||||
- <<: *build-stage
|
||||
env: GOOS=freebsd GOARCH=amd64
|
||||
- <<: *build-stage
|
||||
env: GOOS=freebsd GOARCH=386
|
||||
|
||||
- <<: *build-stage
|
||||
env: GOOS=windows GOARCH=amd64
|
||||
- <<: *build-stage
|
||||
env: GOOS=windows GOARCH=386
|
||||
|
||||
# The test stage executes the test target.
|
||||
- stage: test
|
||||
install: true
|
||||
script: make test
|
||||
|
||||
# The deploy stage deploys the build artifacts using goreleaser.
|
||||
#
|
||||
# This stage will only be activated when there is an annotated tag present
|
||||
# or when the text "/ci-deploy" is present in the commit message. However,
|
||||
# the "deploy" phase of the build will still only be executed on non-PR
|
||||
# builds as that restriction is baked into Travis-CI.
|
||||
#
|
||||
# Finally, this stage requires the Travis-CI VM infrastructure in order to
|
||||
# leverage Docker. This will increase the amount of time the jobs sit
|
||||
# in the queue, waiting to be built. However, it's a necessity as Travis-CI
|
||||
# only allows the use of Docker with VM builds.
|
||||
- stage: deploy
|
||||
if: tag IS present OR commit_message =~ /\/ci-deploy/
|
||||
sudo: required
|
||||
services: docker
|
||||
install: true
|
||||
script: make install
|
||||
after_success: docker login -u="${DOCKER_USERNAME}" -p="${DOCKER_PASSWORD}"
|
||||
deploy:
|
||||
- provider: script
|
||||
skip_cleanup: true
|
||||
script: curl -sL http://git.io/goreleaser | bash
|
||||
addons:
|
||||
apt:
|
||||
update: true
|
||||
packages: xmlstarlet
|
||||
|
|
|
@ -1,6 +1,18 @@
|
|||
# changelog
|
||||
|
||||
### unreleased
|
||||
### 0.20.0 (2018-02-06)
|
||||
|
||||
* Add vslm package for managing First Class Disks
|
||||
|
||||
* Add LoginByToken to session KeepAliveHandler
|
||||
|
||||
### 0.19.0 (2018-09-30)
|
||||
|
||||
* New vapi/rest and and vapi/tags packages
|
||||
|
||||
* Allowing the use of STS for exchanging tokens
|
||||
|
||||
* Add object.VirtualMachine.UUID method
|
||||
|
||||
* SetRootCAs on the soap.Client returns an error for invalid certificates
|
||||
|
||||
|
|
|
@ -6,16 +6,18 @@
|
|||
Abhijeet Kasurde <akasurde@redhat.com>
|
||||
abrarshivani <abrarshivani@users.noreply.github.com>
|
||||
Adam Shannon <adamkshannon@gmail.com>
|
||||
akutz <sakutz@gmail.com>
|
||||
Alessandro Cortiana <alessandro.cortiana@gmail.com>
|
||||
Alex Bozhenko <alexbozhenko@fb.com>
|
||||
Alex Ellis (VMware) <alexellis2@gmail.com>
|
||||
Alvaro Miranda <kikitux@gmail.com>
|
||||
amandahla <amanda.andrade@serpro.gov.br>
|
||||
Amanda H. L. de Andrade <amanda.andrade@serpro.gov.br>
|
||||
Amit Bathla <abathla@.vmware.com>
|
||||
amit bezalel <amit.bezalel@hpe.com>
|
||||
Andrew <AndrewDi@users.noreply.github.com>
|
||||
Andrew Chin <andrew@andrewtchin.com>
|
||||
Andrew Kutz <akutz@vmware.com>
|
||||
Anfernee Yongkun Gui <agui@vmware.com>
|
||||
angystardust <angystardust@users.noreply.github.com>
|
||||
aniketGslab <aniket.shinde@gslab.com>
|
||||
Arran Walker <arran.walker@zopa.com>
|
||||
Aryeh Weinreb <aryehweinreb@gmail.com>
|
||||
|
@ -29,19 +31,23 @@ Cédric Blomart <cblomart@gmail.com>
|
|||
Chris Marchesi <chrism@vancluevertech.com>
|
||||
Christian Höltje <docwhat@gerf.org>
|
||||
Clint Greenwood <cgreenwood@vmware.com>
|
||||
CuiHaozhi <cuihaozhi@chinacloud.com.cn>
|
||||
Danny Lockard <danny.lockard@banno.com>
|
||||
Dave Tucker <dave@dtucker.co.uk>
|
||||
Davide Agnello <dagnello@hp.com>
|
||||
David Stark <dave@davidstark.name>
|
||||
Davinder Kumar <davinderk@vmware.com>
|
||||
Deric Crago <deric.crago@gmail.com>
|
||||
Doug MacEachern <dougm@vmware.com>
|
||||
Eloy Coto <eloy.coto@gmail.com>
|
||||
Eric Gray <egray@vmware.com>
|
||||
Eric Yutao <eric.yutao@gmail.com>
|
||||
Erik Hollensbe <github@hollensbe.org>
|
||||
Ethan Kaley <ethan.kaley@emc.com>
|
||||
Fabio Rapposelli <fabio@vmware.com>
|
||||
Faiyaz Ahmed <ahmedf@vmware.com>
|
||||
forkbomber <forkbomber@users.noreply.github.com>
|
||||
freebsdly <qinhuajun@outlook.com>
|
||||
Gavin Gray <gavin@infinio.com>
|
||||
Gavrie Philipson <gavrie.philipson@elastifile.com>
|
||||
George Hicken <ghicken@vmware.com>
|
||||
|
@ -51,33 +57,52 @@ Hasan Mahmood <mahmoodh@vmware.com>
|
|||
Henrik Hodne <henrik@travis-ci.com>
|
||||
Isaac Rodman <isaac@eyz.us>
|
||||
Ivan Porto Carrero <icarrero@vmware.com>
|
||||
James King <james.king@emc.com>
|
||||
Jason Kincl <jkincl@gmail.com>
|
||||
Jeremy Canady <jcanady@jackhenry.com>
|
||||
jeremy-clerc <jeremy@clerc.io>
|
||||
Jiatong Wang <wjiatong@vmware.com>
|
||||
João Pereira <joaodrp@gmail.com>
|
||||
Jonas Ausevicius <jonas.ausevicius@virtustream.com>
|
||||
Jorge Sevilla <jorge.sevilla@rstor.io>
|
||||
kayrus <kay.diam@gmail.com>
|
||||
Kevin George <georgek@vmware.com>
|
||||
leslie-qiwa <leslie.qiwa@gmail.com>
|
||||
Louie Jiang <jiangl@vmware.com>
|
||||
maplain <fangyuanl@vmware.com>
|
||||
Marc Carmier <mcarmier@gmail.com>
|
||||
Maria Ntalla <maria.ntalla@gmail.com>
|
||||
Marin Atanasov Nikolov <mnikolov@vmware.com>
|
||||
Matt Clay <matt@mystile.com>
|
||||
Matthew Cosgrove <matthew.cosgrove@dell.com>
|
||||
Matt Moriarity <matt@mattmoriarity.com>
|
||||
Mevan Samaratunga <mevansam@gmail.com>
|
||||
Michal Jankowski <mjankowski@vmware.com>
|
||||
mingwei <mingwei@smartx.com>
|
||||
Nicolas Lamirault <nicolas.lamirault@gmail.com>
|
||||
Omar Kohl <omarkohl@gmail.com>
|
||||
Parham Alvani <parham.alvani@gmail.com>
|
||||
Pierre Gronlier <pierre.gronlier@corp.ovh.com>
|
||||
Pieter Noordhuis <pnoordhuis@vmware.com>
|
||||
prydin <prydin@vmware.com>
|
||||
Rowan Jacobs <rojacobs@pivotal.io>
|
||||
runner.mei <runner.mei@gmail.com>
|
||||
S.Çağlar Onur <conur@vmware.com>
|
||||
Sergey Ignatov <sergey.ignatov@jetbrains.com>
|
||||
Steve Purcell <steve@sanityinc.com>
|
||||
Takaaki Furukawa <takaaki.frkw@gmail.com>
|
||||
Tamas Eger <tamas.eger@bitrise.io>
|
||||
tanishi <tanishi503@gmail.com>
|
||||
Ted Zlatanov <tzz@lifelogs.com>
|
||||
Thibaut Ackermann <thibaut.ackermann@alcatel-lucent.com>
|
||||
Trevor Dawe <trevor.dawe@gmail.com>
|
||||
Uwe Bessle <Uwe.Bessle@iteratec.de>
|
||||
Vadim Egorov <vegorov@vmware.com>
|
||||
Vikram Krishnamurthy <vikramkrishnamu@vmware.com>
|
||||
Volodymyr Bobyr <pupsua@gmail.com>
|
||||
Witold Krecicki <wpk@culm.net>
|
||||
Yang Yang <yangy@vmware.com>
|
||||
Yuya Kusakabe <yuya.kusakabe@gmail.com>
|
||||
Zacharias Taubert <zacharias.taubert@gmail.com>
|
||||
Zach Tucker <ztucker@vmware.com>
|
||||
Zee Yang <zeey@vmware.com>
|
||||
|
|
|
@ -3,42 +3,58 @@
|
|||
|
||||
[[projects]]
|
||||
branch = "improvements"
|
||||
digest = "1:b183578c34fabccaf65f1a57d2efeec2086abdce1446978d69ab3a0016cb750c"
|
||||
name = "github.com/davecgh/go-xdr"
|
||||
packages = ["xdr2"]
|
||||
pruneopts = "NUT"
|
||||
revision = "4930550ba2e22f87187498acfd78348b15f4e7a8"
|
||||
source = "https://github.com/rasky/go-xdr"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:1ab18cf8c2084968d6dca0dd46fbda9efba08664ecd7957b63c7ca57bb2455df"
|
||||
name = "github.com/google/uuid"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "6a5e28554805e78ea6141142aba763936c4761c0"
|
||||
|
||||
[[projects]]
|
||||
branch = "govmomi"
|
||||
digest = "1:f49ed6cb2129e9a3ce9dde5037cb243b5849c0ec0c7973b9d1e987872d8b8cc6"
|
||||
name = "github.com/kr/pretty"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "2ee9d7453c02ef7fa518a83ae23644eb8872186a"
|
||||
source = "https://github.com/dougm/pretty"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:c3a7836b5904db0f8b609595b619916a6831cb35b8b714aec39f96d00c6155d8"
|
||||
name = "github.com/kr/text"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "7cafcd837844e784b526369c9bce262804aebc60"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:4bea31865971675c482ed875caeabe7d2182dcb47d52900b7da5236d66dc9970"
|
||||
name = "github.com/vmware/vmw-guestinfo"
|
||||
packages = [
|
||||
"bdoor",
|
||||
"message",
|
||||
"vmcheck"
|
||||
"vmcheck",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "25eff159a728be87e103a0b8045e08273f4dbec4"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "376638fa6c0621cbd980caf8fc53494d880886f100663da8de47ecb6e596e439"
|
||||
input-imports = [
|
||||
"github.com/davecgh/go-xdr/xdr2",
|
||||
"github.com/google/uuid",
|
||||
"github.com/kr/pretty",
|
||||
"github.com/vmware/vmw-guestinfo/message",
|
||||
"github.com/vmware/vmw-guestinfo/vmcheck",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
|
@ -6,7 +6,7 @@ check: goimports govet
|
|||
|
||||
goimports:
|
||||
@echo checking go imports...
|
||||
@go get golang.org/x/tools/cmd/goimports
|
||||
@command -v goimports >/dev/null 2>&1 || go get golang.org/x/tools/cmd/goimports
|
||||
@! goimports -d . 2>&1 | egrep -v '^$$'
|
||||
|
||||
govet:
|
||||
|
@ -14,8 +14,8 @@ govet:
|
|||
@go tool vet -structtags=false -methods=false $$(find . -mindepth 1 -maxdepth 1 -type d -not -name vendor)
|
||||
|
||||
install:
|
||||
go install -v github.com/vmware/govmomi/govc
|
||||
go install -v github.com/vmware/govmomi/vcsim
|
||||
$(MAKE) -C govc install
|
||||
$(MAKE) -C vcsim install
|
||||
|
||||
go-test:
|
||||
GORACE=history_size=5 go test -timeout 5m -count 1 -race -v $(TEST_OPTS) ./...
|
||||
|
|
|
@ -59,6 +59,10 @@ Refer to the [CHANGELOG](CHANGELOG.md) for version to version changes.
|
|||
|
||||
* [Kubernetes](https://github.com/kubernetes/kubernetes/tree/master/pkg/cloudprovider/providers/vsphere)
|
||||
|
||||
* [Kubernetes Cloud Provider](https://github.com/kubernetes/cloud-provider-vsphere)
|
||||
|
||||
* [Kubernetes Cluster API](https://github.com/kubernetes-sigs/cluster-api-provider-vsphere)
|
||||
|
||||
* [Kubernetes kops](https://github.com/kubernetes/kops/tree/master/upup/pkg/fi/cloudup/vsphere)
|
||||
|
||||
* [Terraform](https://github.com/terraform-providers/terraform-provider-vsphere)
|
||||
|
@ -75,6 +79,10 @@ Refer to the [CHANGELOG](CHANGELOG.md) for version to version changes.
|
|||
|
||||
* [Libretto](https://github.com/apcera/libretto/tree/master/virtualmachine/vsphere)
|
||||
|
||||
* [Telegraf](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/vsphere)
|
||||
|
||||
* [Open Storage](https://github.com/libopenstorage/openstorage/tree/master/pkg/storageops/vsphere)
|
||||
|
||||
## Related projects
|
||||
|
||||
* [rbvmomi](https://github.com/vmware/rbvmomi)
|
||||
|
|
|
@ -29,7 +29,7 @@ var content = types.LookupServiceContent{
|
|||
LookupService: vim.ManagedObjectReference{Type: "LookupLookupService", Value: "lookupService"},
|
||||
ServiceRegistration: &vim.ManagedObjectReference{Type: "LookupServiceRegistration", Value: "ServiceRegistration"},
|
||||
DeploymentInformationService: vim.ManagedObjectReference{Type: "LookupDeploymentInformationService", Value: "deploymentInformationService"},
|
||||
L10n: vim.ManagedObjectReference{Type: "LookupL10n", Value: "l10n"},
|
||||
L10n: vim.ManagedObjectReference{Type: "LookupL10n", Value: "l10n"},
|
||||
}
|
||||
|
||||
func New() *simulator.Registry {
|
||||
|
@ -43,7 +43,7 @@ func New() *simulator.Registry {
|
|||
})
|
||||
r.Put(&ServiceRegistration{
|
||||
ManagedObjectReference: *content.ServiceRegistration,
|
||||
Info: registrationInfo(),
|
||||
Info: registrationInfo(),
|
||||
})
|
||||
|
||||
return r
|
||||
|
|
|
@ -57,10 +57,10 @@ func (o FileItem) File() types.OvfFile {
|
|||
}
|
||||
|
||||
type LeaseUpdater struct {
|
||||
lease *Lease
|
||||
pos int64 // Number of bytes (keep first to ensure 64 bit aligment)
|
||||
total int64 // Total number of bytes (keep first to ensure 64 bit aligment)
|
||||
|
||||
pos int64 // Number of bytes
|
||||
total int64 // Total number of bytes
|
||||
lease *Lease
|
||||
|
||||
done chan struct{} // When lease updater should stop
|
||||
|
||||
|
|
|
@ -100,6 +100,7 @@ func (c Common) ObjectName(ctx context.Context) (string, error) {
|
|||
return n.Name, nil
|
||||
}
|
||||
|
||||
// Properties is a wrapper for property.DefaultCollector().RetrieveOne()
|
||||
func (c Common) Properties(ctx context.Context, r types.ManagedObjectReference, ps []string, dst interface{}) error {
|
||||
return property.DefaultCollector(c.c).RetrieveOne(ctx, r, ps, dst)
|
||||
}
|
||||
|
@ -130,3 +131,14 @@ func (c Common) Rename(ctx context.Context, name string) (*Task, error) {
|
|||
|
||||
return NewTask(c.c, res.Returnval), nil
|
||||
}
|
||||
|
||||
func (c Common) SetCustomValue(ctx context.Context, key string, value string) error {
|
||||
req := types.SetCustomValue{
|
||||
This: c.Reference(),
|
||||
Key: key,
|
||||
Value: value,
|
||||
}
|
||||
|
||||
_, err := methods.SetCustomValue(ctx, c.c, &req)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -17,17 +17,16 @@ limitations under the License.
|
|||
package object
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"context"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/vmware/govmomi/property"
|
||||
"github.com/vmware/govmomi/session"
|
||||
"github.com/vmware/govmomi/vim25"
|
||||
|
|
|
@ -85,8 +85,8 @@ func (f FileManager) DeleteDatastoreFile(ctx context.Context, name string, dc *D
|
|||
// MakeDirectory creates a folder using the specified name.
|
||||
func (f FileManager) MakeDirectory(ctx context.Context, name string, dc *Datacenter, createParentDirectories bool) error {
|
||||
req := types.MakeDirectory{
|
||||
This: f.Reference(),
|
||||
Name: name,
|
||||
This: f.Reference(),
|
||||
Name: name,
|
||||
CreateParentDirectories: types.NewBool(createParentDirectories),
|
||||
}
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ func (m HostCertificateManager) CertificateInfo(ctx context.Context) (*HostCerti
|
|||
// Use InstallServerCertificate to import this certificate.
|
||||
func (m HostCertificateManager) GenerateCertificateSigningRequest(ctx context.Context, useIPAddressAsCommonName bool) (string, error) {
|
||||
req := types.GenerateCertificateSigningRequest{
|
||||
This: m.Reference(),
|
||||
This: m.Reference(),
|
||||
UseIpAddressAsCommonName: useIPAddressAsCommonName,
|
||||
}
|
||||
|
||||
|
|
|
@ -82,6 +82,20 @@ func (v VirtualMachine) PowerOff(ctx context.Context) (*Task, error) {
|
|||
return NewTask(v.c, res.Returnval), nil
|
||||
}
|
||||
|
||||
func (v VirtualMachine) PutUsbScanCodes(ctx context.Context, spec types.UsbScanCodeSpec) (int32, error) {
|
||||
req := types.PutUsbScanCodes{
|
||||
This: v.Reference(),
|
||||
Spec: spec,
|
||||
}
|
||||
|
||||
res, err := methods.PutUsbScanCodes(ctx, v.c, &req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return res.Returnval, nil
|
||||
}
|
||||
|
||||
func (v VirtualMachine) Reset(ctx context.Context) (*Task, error) {
|
||||
req := types.ResetVM_Task{
|
||||
This: v.Reference(),
|
||||
|
@ -198,6 +212,15 @@ func (v VirtualMachine) Reconfigure(ctx context.Context, config types.VirtualMac
|
|||
return NewTask(v.c, res.Returnval), nil
|
||||
}
|
||||
|
||||
func (v VirtualMachine) RefreshStorageInfo(ctx context.Context) error {
|
||||
req := types.RefreshStorageInfo{
|
||||
This: v.Reference(),
|
||||
}
|
||||
|
||||
_, err := methods.RefreshStorageInfo(ctx, v.c, &req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (v VirtualMachine) WaitForIP(ctx context.Context) (string, error) {
|
||||
var ip string
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
|
||||
const (
|
||||
Namespace = "pbm"
|
||||
Path = "/pbm" + vim25.Path
|
||||
Path = "/pbm"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
ifneq (,$(strip $(GOOS)))
|
||||
ifeq (,$(strip $(GOARCH)))
|
||||
GOARCH := $(shell go env | grep GOARCH | awk -F= '{print $$2}' | tr -d '"')
|
||||
endif
|
||||
endif
|
||||
|
||||
ifneq (,$(strip $(GOARCH)))
|
||||
ifeq (,$(strip $(GOOS)))
|
||||
GOOS := $(shell go env | grep GOOS | awk -F= '{print $$2}' | tr -d '"')
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq (2,$(words $(GOOS) $(GOARCH)))
|
||||
PROGRAM := $(PROGRAM)_$(GOOS)_$(GOARCH)
|
||||
endif
|
||||
|
||||
ifeq (windows,$(GOOS))
|
||||
PROGRAM := $(PROGRAM).exe
|
||||
endif
|
||||
|
||||
all: $(PROGRAM)
|
||||
|
||||
TAGS += netgo
|
||||
ifeq (,$(strip $(findstring -w,$(LDFLAGS))))
|
||||
LDFLAGS += -w
|
||||
endif
|
||||
BUILD_ARGS := -tags '$(TAGS)' -ldflags '$(LDFLAGS)' -v
|
||||
|
||||
$(PROGRAM):
|
||||
CGO_ENABLED=0 go build -a $(BUILD_ARGS) -o $@
|
||||
|
||||
install:
|
||||
CGO_ENABLED=0 go install $(BUILD_ARGS)
|
||||
|
||||
ifneq (,$(strip $(BUILD_OS)))
|
||||
ifneq (,$(strip $(BUILD_ARCH)))
|
||||
GOOS_GOARCH_TARGETS := $(foreach a,$(BUILD_ARCH),$(patsubst %,%_$a,$(BUILD_OS)))
|
||||
XBUILD := $(addprefix $(PROGRAM)_,$(GOOS_GOARCH_TARGETS))
|
||||
$(XBUILD):
|
||||
GOOS=$(word 2,$(subst _, ,$@)) GOARCH=$(word 3,$(subst _, ,$@)) $(MAKE) --output-sync=target
|
||||
build-all: $(XBUILD)
|
||||
endif
|
||||
endif
|
||||
|
||||
clean:
|
||||
@rm -f $(PROGRAM) $(XBUILD)
|
||||
|
||||
.PHONY: build-all install clean
|
|
@ -126,30 +126,30 @@ func (p *Collector) RetrieveProperties(ctx context.Context, req types.RetrievePr
|
|||
// must be a pointer to a []interface{}, which is populated with the instances
|
||||
// of the specified managed objects, with the relevant properties filled in. If
|
||||
// the properties slice is nil, all properties are loaded.
|
||||
// Note that pointer types are optional fields that may be left as a nil value.
|
||||
// The caller should check such fields for a nil value before dereferencing.
|
||||
func (p *Collector) Retrieve(ctx context.Context, objs []types.ManagedObjectReference, ps []string, dst interface{}) error {
|
||||
if len(objs) == 0 {
|
||||
return errors.New("object references is empty")
|
||||
}
|
||||
|
||||
var propSpec *types.PropertySpec
|
||||
kinds := make(map[string]bool)
|
||||
|
||||
var propSet []types.PropertySpec
|
||||
var objectSet []types.ObjectSpec
|
||||
|
||||
for _, obj := range objs {
|
||||
// Ensure that all object reference types are the same
|
||||
if propSpec == nil {
|
||||
propSpec = &types.PropertySpec{
|
||||
if _, ok := kinds[obj.Type]; !ok {
|
||||
spec := types.PropertySpec{
|
||||
Type: obj.Type,
|
||||
}
|
||||
|
||||
if ps == nil {
|
||||
propSpec.All = types.NewBool(true)
|
||||
spec.All = types.NewBool(true)
|
||||
} else {
|
||||
propSpec.PathSet = ps
|
||||
}
|
||||
} else {
|
||||
if obj.Type != propSpec.Type {
|
||||
return errors.New("object references must have the same type")
|
||||
spec.PathSet = ps
|
||||
}
|
||||
propSet = append(propSet, spec)
|
||||
kinds[obj.Type] = true
|
||||
}
|
||||
|
||||
objectSpec := types.ObjectSpec{
|
||||
|
@ -164,7 +164,7 @@ func (p *Collector) Retrieve(ctx context.Context, objs []types.ManagedObjectRefe
|
|||
SpecSet: []types.PropertyFilterSpec{
|
||||
{
|
||||
ObjectSet: objectSet,
|
||||
PropSet: []types.PropertySpec{*propSpec},
|
||||
PropSet: propSet,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -204,7 +204,7 @@ func (p *Collector) RetrieveWithFilter(ctx context.Context, objs []types.Managed
|
|||
return p.Retrieve(ctx, objs, ps, dst)
|
||||
}
|
||||
|
||||
// RetrieveOne calls Retrieve with a single managed object reference.
|
||||
// RetrieveOne calls Retrieve with a single managed object reference via Collector.Retrieve().
|
||||
func (p *Collector) RetrieveOne(ctx context.Context, obj types.ManagedObjectReference, ps []string, dst interface{}) error {
|
||||
var objs = []types.ManagedObjectReference{obj}
|
||||
return p.Retrieve(ctx, objs, ps, dst)
|
||||
|
|
|
@ -114,10 +114,9 @@ func (k *keepAlive) RoundTrip(ctx context.Context, req, res soap.HasFault) error
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start ticker on login, stop ticker on logout.
|
||||
switch req.(type) {
|
||||
case *methods.LoginBody, *methods.LoginExtensionByCertificateBody:
|
||||
case *methods.LoginBody, *methods.LoginExtensionByCertificateBody, *methods.LoginByTokenBody:
|
||||
k.start()
|
||||
case *methods.LogoutBody:
|
||||
k.stop()
|
||||
|
|
|
@ -5,12 +5,14 @@ go_library(
|
|||
srcs = [
|
||||
"authorization_manager.go",
|
||||
"cluster_compute_resource.go",
|
||||
"container.go",
|
||||
"custom_fields_manager.go",
|
||||
"datacenter.go",
|
||||
"datastore.go",
|
||||
"doc.go",
|
||||
"dvs.go",
|
||||
"entity.go",
|
||||
"environment_browser.go",
|
||||
"event_manager.go",
|
||||
"file_manager.go",
|
||||
"folder.go",
|
||||
|
@ -24,6 +26,7 @@ go_library(
|
|||
"ip_pool_manager.go",
|
||||
"license_manager.go",
|
||||
"model.go",
|
||||
"object.go",
|
||||
"option_manager.go",
|
||||
"os_unix.go",
|
||||
"os_windows.go",
|
||||
|
@ -38,12 +41,14 @@ go_library(
|
|||
"session_manager.go",
|
||||
"simulator.go",
|
||||
"snapshot.go",
|
||||
"storage_resource_manager.go",
|
||||
"task.go",
|
||||
"task_manager.go",
|
||||
"user_directory.go",
|
||||
"view_manager.go",
|
||||
"virtual_disk_manager.go",
|
||||
"virtual_machine.go",
|
||||
"vstorage_object_manager.go",
|
||||
],
|
||||
importmap = "k8s.io/kubernetes/vendor/github.com/vmware/govmomi/simulator",
|
||||
importpath = "github.com/vmware/govmomi/simulator",
|
||||
|
|
|
@ -33,6 +33,10 @@ type ClusterComputeResource struct {
|
|||
ruleKey int32
|
||||
}
|
||||
|
||||
func (c *ClusterComputeResource) RenameTask(req *types.Rename_Task) soap.HasFault {
|
||||
return RenameTask(c, req)
|
||||
}
|
||||
|
||||
type addHost struct {
|
||||
*ClusterComputeResource
|
||||
|
||||
|
@ -300,6 +304,7 @@ func CreateClusterComputeResource(f *Folder, name string, spec types.ClusterConf
|
|||
}
|
||||
|
||||
cluster := &ClusterComputeResource{}
|
||||
cluster.EnvironmentBrowser = newEnvironmentBrowser()
|
||||
cluster.Name = name
|
||||
cluster.Summary = &types.ClusterComputeResourceSummary{
|
||||
UsageSummary: new(types.ClusterUsageSummary),
|
||||
|
|
|
@ -0,0 +1,167 @@
|
|||
/*
|
||||
Copyright (c) 2018 VMware, Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package simulator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"log"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
)
|
||||
|
||||
// container provides methods to manage a container within a simulator VM lifecycle.
|
||||
type container struct {
|
||||
id string
|
||||
}
|
||||
|
||||
// inspect applies container network settings to vm.Guest properties.
|
||||
func (c *container) inspect(vm *VirtualMachine) error {
|
||||
if c.id == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
var objects []struct {
|
||||
NetworkSettings struct {
|
||||
Gateway string
|
||||
IPAddress string
|
||||
IPPrefixLen int
|
||||
MacAddress string
|
||||
}
|
||||
}
|
||||
|
||||
cmd := exec.Command("docker", "inspect", c.id)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = json.NewDecoder(bytes.NewReader(out)).Decode(&objects); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vm.Config.Annotation = strings.Join(cmd.Args, " ")
|
||||
vm.logPrintf("%s: %s", vm.Config.Annotation, string(out))
|
||||
|
||||
for _, o := range objects {
|
||||
s := o.NetworkSettings
|
||||
if s.IPAddress == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
vm.Guest.IpAddress = s.IPAddress
|
||||
vm.Summary.Guest.IpAddress = s.IPAddress
|
||||
|
||||
if len(vm.Guest.Net) != 0 {
|
||||
net := &vm.Guest.Net[0]
|
||||
net.IpAddress = []string{s.IPAddress}
|
||||
net.MacAddress = s.MacAddress
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// start runs the container if specified by the RUN.container extraConfig property.
|
||||
func (c *container) start(vm *VirtualMachine) {
|
||||
if c.id != "" {
|
||||
start := "start"
|
||||
if vm.Runtime.PowerState == types.VirtualMachinePowerStateSuspended {
|
||||
start = "unpause"
|
||||
}
|
||||
cmd := exec.Command("docker", start, c.id)
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
log.Printf("%s %s: %s", vm.Name, cmd.Args, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var args []string
|
||||
|
||||
for _, opt := range vm.Config.ExtraConfig {
|
||||
val := opt.GetOptionValue()
|
||||
if val.Key == "RUN.container" {
|
||||
run := val.Value.(string)
|
||||
err := json.Unmarshal([]byte(run), &args)
|
||||
if err != nil {
|
||||
args = []string{run}
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
args = append([]string{"run", "-d", "--name", vm.Name}, args...)
|
||||
cmd := exec.Command("docker", args...)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
log.Printf("%s %s: %s", vm.Name, cmd.Args, err)
|
||||
return
|
||||
}
|
||||
|
||||
c.id = strings.TrimSpace(string(out))
|
||||
vm.logPrintf("%s %s: %s", cmd.Path, cmd.Args, c.id)
|
||||
|
||||
if err = c.inspect(vm); err != nil {
|
||||
log.Printf("%s inspect %s: %s", vm.Name, c.id, err)
|
||||
}
|
||||
}
|
||||
|
||||
// stop the container (if any) for the given vm.
|
||||
func (c *container) stop(vm *VirtualMachine) {
|
||||
if c.id == "" {
|
||||
return
|
||||
}
|
||||
|
||||
cmd := exec.Command("docker", "stop", c.id)
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
log.Printf("%s %s: %s", vm.Name, cmd.Args, err)
|
||||
}
|
||||
}
|
||||
|
||||
// pause the container (if any) for the given vm.
|
||||
func (c *container) pause(vm *VirtualMachine) {
|
||||
if c.id == "" {
|
||||
return
|
||||
}
|
||||
|
||||
cmd := exec.Command("docker", "pause", c.id)
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
log.Printf("%s %s: %s", vm.Name, cmd.Args, err)
|
||||
}
|
||||
}
|
||||
|
||||
// remove the container (if any) for the given vm.
|
||||
func (c *container) remove(vm *VirtualMachine) {
|
||||
if c.id == "" {
|
||||
return
|
||||
}
|
||||
|
||||
cmd := exec.Command("docker", "rm", "-f", c.id)
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
log.Printf("%s %s: %s", vm.Name, cmd.Args, err)
|
||||
}
|
||||
}
|
|
@ -36,7 +36,70 @@ func NewCustomFieldsManager(ref types.ManagedObjectReference) object.Reference {
|
|||
return m
|
||||
}
|
||||
|
||||
func (c *CustomFieldsManager) find(key int32) (int, *types.CustomFieldDef) {
|
||||
// Iterates through all entities of passed field type;
|
||||
// Removes found field from their custom field properties.
|
||||
func entitiesFieldRemove(field types.CustomFieldDef) {
|
||||
entities := Map.All(field.ManagedObjectType)
|
||||
for _, e := range entities {
|
||||
entity := e.Entity()
|
||||
Map.WithLock(entity, func() {
|
||||
aFields := entity.AvailableField
|
||||
for i, aField := range aFields {
|
||||
if aField.Key == field.Key {
|
||||
entity.AvailableField = append(aFields[:i], aFields[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
values := e.Entity().Value
|
||||
for i, value := range values {
|
||||
if value.(*types.CustomFieldStringValue).Key == field.Key {
|
||||
entity.Value = append(values[:i], values[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
cValues := e.Entity().CustomValue
|
||||
for i, cValue := range cValues {
|
||||
if cValue.(*types.CustomFieldStringValue).Key == field.Key {
|
||||
entity.CustomValue = append(cValues[:i], cValues[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Iterates through all entities of passed field type;
|
||||
// Renames found field in entity's AvailableField property.
|
||||
func entitiesFieldRename(field types.CustomFieldDef) {
|
||||
entities := Map.All(field.ManagedObjectType)
|
||||
for _, e := range entities {
|
||||
entity := e.Entity()
|
||||
Map.WithLock(entity, func() {
|
||||
aFields := entity.AvailableField
|
||||
for i, aField := range aFields {
|
||||
if aField.Key == field.Key {
|
||||
aFields[i].Name = field.Name
|
||||
break
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CustomFieldsManager) findByNameType(name, moType string) (int, *types.CustomFieldDef) {
|
||||
for i, field := range c.Field {
|
||||
if (field.ManagedObjectType == "" || field.ManagedObjectType == moType || moType == "") &&
|
||||
field.Name == name {
|
||||
return i, &c.Field[i]
|
||||
}
|
||||
}
|
||||
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
func (c *CustomFieldsManager) findByKey(key int32) (int, *types.CustomFieldDef) {
|
||||
for i, field := range c.Field {
|
||||
if field.Key == key {
|
||||
return i, &c.Field[i]
|
||||
|
@ -49,6 +112,15 @@ func (c *CustomFieldsManager) find(key int32) (int, *types.CustomFieldDef) {
|
|||
func (c *CustomFieldsManager) AddCustomFieldDef(req *types.AddCustomFieldDef) soap.HasFault {
|
||||
body := &methods.AddCustomFieldDefBody{}
|
||||
|
||||
_, field := c.findByNameType(req.Name, req.MoType)
|
||||
if field != nil {
|
||||
body.Fault_ = Fault("", &types.DuplicateName{
|
||||
Name: req.Name,
|
||||
Object: c.Reference(),
|
||||
})
|
||||
return body
|
||||
}
|
||||
|
||||
def := types.CustomFieldDef{
|
||||
Key: c.nextKey,
|
||||
Name: req.Name,
|
||||
|
@ -58,6 +130,14 @@ func (c *CustomFieldsManager) AddCustomFieldDef(req *types.AddCustomFieldDef) so
|
|||
FieldInstancePrivileges: req.FieldPolicy,
|
||||
}
|
||||
|
||||
entities := Map.All(req.MoType)
|
||||
for _, e := range entities {
|
||||
entity := e.Entity()
|
||||
Map.WithLock(entity, func() {
|
||||
entity.AvailableField = append(entity.AvailableField, def)
|
||||
})
|
||||
}
|
||||
|
||||
c.Field = append(c.Field, def)
|
||||
c.nextKey++
|
||||
|
||||
|
@ -70,12 +150,14 @@ func (c *CustomFieldsManager) AddCustomFieldDef(req *types.AddCustomFieldDef) so
|
|||
func (c *CustomFieldsManager) RemoveCustomFieldDef(req *types.RemoveCustomFieldDef) soap.HasFault {
|
||||
body := &methods.RemoveCustomFieldDefBody{}
|
||||
|
||||
i, field := c.find(req.Key)
|
||||
i, field := c.findByKey(req.Key)
|
||||
if field == nil {
|
||||
body.Fault_ = Fault("", &types.NotFound{})
|
||||
return body
|
||||
}
|
||||
|
||||
entitiesFieldRemove(*field)
|
||||
|
||||
c.Field = append(c.Field[:i], c.Field[i+1:]...)
|
||||
|
||||
body.Res = &types.RemoveCustomFieldDefResponse{}
|
||||
|
@ -85,7 +167,7 @@ func (c *CustomFieldsManager) RemoveCustomFieldDef(req *types.RemoveCustomFieldD
|
|||
func (c *CustomFieldsManager) RenameCustomFieldDef(req *types.RenameCustomFieldDef) soap.HasFault {
|
||||
body := &methods.RenameCustomFieldDefBody{}
|
||||
|
||||
_, field := c.find(req.Key)
|
||||
_, field := c.findByKey(req.Key)
|
||||
if field == nil {
|
||||
body.Fault_ = Fault("", &types.NotFound{})
|
||||
return body
|
||||
|
@ -93,19 +175,30 @@ func (c *CustomFieldsManager) RenameCustomFieldDef(req *types.RenameCustomFieldD
|
|||
|
||||
field.Name = req.Name
|
||||
|
||||
entitiesFieldRename(*field)
|
||||
|
||||
body.Res = &types.RenameCustomFieldDefResponse{}
|
||||
return body
|
||||
}
|
||||
|
||||
func (c *CustomFieldsManager) SetField(req *types.SetField) soap.HasFault {
|
||||
func (c *CustomFieldsManager) SetField(ctx *Context, req *types.SetField) soap.HasFault {
|
||||
body := &methods.SetFieldBody{}
|
||||
|
||||
_, field := c.findByKey(req.Key)
|
||||
if field == nil {
|
||||
body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "key"})
|
||||
return body
|
||||
}
|
||||
|
||||
newValue := &types.CustomFieldStringValue{
|
||||
CustomFieldValue: types.CustomFieldValue{Key: req.Key},
|
||||
Value: req.Value,
|
||||
}
|
||||
|
||||
entity := Map.Get(req.Entity).(mo.Entity).Entity()
|
||||
Map.WithLock(entity, func() {
|
||||
entity.CustomValue = append(entity.CustomValue, &types.CustomFieldStringValue{
|
||||
CustomFieldValue: types.CustomFieldValue{Key: req.Key},
|
||||
Value: req.Value,
|
||||
})
|
||||
ctx.WithLock(entity, func() {
|
||||
entity.CustomValue = append(entity.CustomValue, newValue)
|
||||
entity.Value = append(entity.Value, newValue)
|
||||
})
|
||||
|
||||
body.Res = &types.SetFieldResponse{}
|
||||
|
|
|
@ -31,6 +31,9 @@ func (s *DistributedVirtualSwitch) AddDVPortgroupTask(c *types.AddDVPortgroup_Ta
|
|||
task := CreateTask(s, "addDVPortgroup", func(t *Task) (types.AnyType, types.BaseMethodFault) {
|
||||
f := Map.getEntityParent(s, "Folder").(*Folder)
|
||||
|
||||
portgroups := s.Portgroup
|
||||
portgroupNames := s.Summary.PortgroupName
|
||||
|
||||
for _, spec := range c.Spec {
|
||||
pg := &DistributedVirtualPortgroup{}
|
||||
pg.Name = spec.Name
|
||||
|
@ -71,17 +74,28 @@ func (s *DistributedVirtualSwitch) AddDVPortgroupTask(c *types.AddDVPortgroup_Ta
|
|||
|
||||
pg.PortKeys = []string{}
|
||||
|
||||
s.Portgroup = append(s.Portgroup, pg.Self)
|
||||
s.Summary.PortgroupName = append(s.Summary.PortgroupName, pg.Name)
|
||||
portgroups = append(portgroups, pg.Self)
|
||||
portgroupNames = append(portgroupNames, pg.Name)
|
||||
|
||||
for _, h := range s.Summary.HostMember {
|
||||
pg.Host = append(pg.Host, h)
|
||||
|
||||
host := Map.Get(h).(*HostSystem)
|
||||
Map.AppendReference(host, &host.Network, pg.Reference())
|
||||
|
||||
parent := Map.Get(*host.HostSystem.Parent)
|
||||
computeNetworks := append(hostParent(&host.HostSystem).Network, pg.Reference())
|
||||
Map.Update(parent, []types.PropertyChange{
|
||||
{Name: "network", Val: computeNetworks},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
Map.Update(s, []types.PropertyChange{
|
||||
{Name: "portgroup", Val: portgroups},
|
||||
{Name: "summary.portgroupName", Val: portgroupNames},
|
||||
})
|
||||
|
||||
return nil, nil
|
||||
})
|
||||
|
||||
|
@ -96,6 +110,8 @@ func (s *DistributedVirtualSwitch) ReconfigureDvsTask(req *types.ReconfigureDvs_
|
|||
task := CreateTask(s, "reconfigureDvs", func(t *Task) (types.AnyType, types.BaseMethodFault) {
|
||||
spec := req.Spec.GetDVSConfigSpec()
|
||||
|
||||
members := s.Summary.HostMember
|
||||
|
||||
for _, member := range spec.Host {
|
||||
h := Map.Get(member.Host)
|
||||
if h == nil {
|
||||
|
@ -110,13 +126,27 @@ func (s *DistributedVirtualSwitch) ReconfigureDvsTask(req *types.ReconfigureDvs_
|
|||
return nil, &types.AlreadyExists{Name: host.Name}
|
||||
}
|
||||
|
||||
Map.AppendReference(host, &host.Network, s.Portgroup...)
|
||||
s.Summary.HostMember = append(s.Summary.HostMember, member.Host)
|
||||
hostNetworks := append(host.Network, s.Portgroup...)
|
||||
Map.Update(host, []types.PropertyChange{
|
||||
{Name: "network", Val: hostNetworks},
|
||||
})
|
||||
members = append(members, member.Host)
|
||||
parent := Map.Get(*host.HostSystem.Parent)
|
||||
|
||||
var pgs []types.ManagedObjectReference
|
||||
for _, ref := range s.Portgroup {
|
||||
pg := Map.Get(ref).(*DistributedVirtualPortgroup)
|
||||
Map.AddReference(pg, &pg.Host, member.Host)
|
||||
pgs = append(pgs, ref)
|
||||
|
||||
pgHosts := append(pg.Host, member.Host)
|
||||
Map.Update(pg, []types.PropertyChange{
|
||||
{Name: "host", Val: pgHosts},
|
||||
})
|
||||
}
|
||||
|
||||
Map.Update(parent, []types.PropertyChange{
|
||||
{Name: "network", Val: pgs},
|
||||
})
|
||||
case types.ConfigSpecOperationRemove:
|
||||
for _, ref := range host.Vm {
|
||||
vm := Map.Get(ref).(*VirtualMachine)
|
||||
|
@ -128,12 +158,16 @@ func (s *DistributedVirtualSwitch) ReconfigureDvsTask(req *types.ReconfigureDvs_
|
|||
}
|
||||
}
|
||||
|
||||
RemoveReference(&s.Summary.HostMember, member.Host)
|
||||
RemoveReference(&members, member.Host)
|
||||
case types.ConfigSpecOperationEdit:
|
||||
return nil, &types.NotSupported{}
|
||||
}
|
||||
}
|
||||
|
||||
Map.Update(s, []types.PropertyChange{
|
||||
{Name: "summary.hostMember", Val: members},
|
||||
})
|
||||
|
||||
return nil, nil
|
||||
})
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ func RenameTask(e mo.Entity, r *types.Rename_Task) soap.HasFault {
|
|||
}
|
||||
}
|
||||
|
||||
obj.Name = r.NewName
|
||||
Map.Update(e, []types.PropertyChange{{Name: "name", Val: r.NewName}})
|
||||
|
||||
return nil, nil
|
||||
})
|
||||
|
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
Copyright (c) 2019 VMware, Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package simulator
|
||||
|
||||
import (
|
||||
"github.com/vmware/govmomi/simulator/esx"
|
||||
"github.com/vmware/govmomi/vim25/methods"
|
||||
"github.com/vmware/govmomi/vim25/mo"
|
||||
"github.com/vmware/govmomi/vim25/soap"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
)
|
||||
|
||||
type EnvironmentBrowser struct {
|
||||
mo.EnvironmentBrowser
|
||||
}
|
||||
|
||||
func newEnvironmentBrowser() *types.ManagedObjectReference {
|
||||
env := new(EnvironmentBrowser)
|
||||
Map.Put(env)
|
||||
return &env.Self
|
||||
}
|
||||
|
||||
func (b *EnvironmentBrowser) QueryConfigOption(req *types.QueryConfigOption) soap.HasFault {
|
||||
body := new(methods.QueryConfigOptionBody)
|
||||
|
||||
opt := &types.VirtualMachineConfigOption{
|
||||
Version: esx.HardwareVersion,
|
||||
DefaultDevice: esx.VirtualDevice,
|
||||
}
|
||||
|
||||
body.Res = &types.QueryConfigOptionResponse{
|
||||
Returnval: opt,
|
||||
}
|
||||
|
||||
return body
|
||||
}
|
||||
|
||||
func (b *EnvironmentBrowser) QueryConfigOptionEx(req *types.QueryConfigOptionEx) soap.HasFault {
|
||||
body := new(methods.QueryConfigOptionExBody)
|
||||
|
||||
opt := &types.VirtualMachineConfigOption{
|
||||
Version: esx.HardwareVersion,
|
||||
DefaultDevice: esx.VirtualDevice,
|
||||
}
|
||||
|
||||
body.Res = &types.QueryConfigOptionExResponse{
|
||||
Returnval: opt,
|
||||
}
|
||||
|
||||
return body
|
||||
}
|
|
@ -13,10 +13,12 @@ go_library(
|
|||
"host_storage_device_info.go",
|
||||
"host_system.go",
|
||||
"performance_manager.go",
|
||||
"performance_manager_data.go",
|
||||
"resource_pool.go",
|
||||
"root_folder.go",
|
||||
"service_content.go",
|
||||
"setting.go",
|
||||
"task_manager.go",
|
||||
"virtual_device.go",
|
||||
],
|
||||
importmap = "k8s.io/kubernetes/vendor/github.com/vmware/govmomi/simulator/esx",
|
||||
|
|
|
@ -77,6 +77,12 @@ var EventInfo = []types.EventDescriptionEventDetail{
|
|||
Category: "info",
|
||||
FullFormat: "Host {{.Host.Name}} in {{.Datacenter.Name}} has exited maintenance mode",
|
||||
},
|
||||
{
|
||||
Key: "HostRemovedEvent",
|
||||
Description: "Host removed",
|
||||
FullFormat: "Removed host {{.Host.Name}} in {{.Datacenter.Name}}",
|
||||
Category: "info",
|
||||
},
|
||||
{
|
||||
Key: "VmSuspendedEvent",
|
||||
Description: "VM suspended",
|
||||
|
|
|
@ -355,7 +355,7 @@ var HostConfigInfo = types.HostConfigInfo{
|
|||
SubnetMask: "255.0.0.0",
|
||||
IpV6Config: (*types.HostIpConfigIpV6AddressConfiguration)(nil),
|
||||
},
|
||||
Mac: "00:0c:29:81:d8:a0",
|
||||
Mac: "00:0c:29:81:d8:a0",
|
||||
DistributedVirtualPort: (*types.DistributedVirtualSwitchPortConnection)(nil),
|
||||
Portgroup: "Management Network",
|
||||
Mtu: 1500,
|
||||
|
@ -489,7 +489,7 @@ var HostConfigInfo = types.HostConfigInfo{
|
|||
SubnetMask: "255.0.0.0",
|
||||
IpV6Config: (*types.HostIpConfigIpV6AddressConfiguration)(nil),
|
||||
},
|
||||
Mac: "00:0c:29:81:d8:a0",
|
||||
Mac: "00:0c:29:81:d8:a0",
|
||||
DistributedVirtualPort: (*types.DistributedVirtualSwitchPortConnection)(nil),
|
||||
Portgroup: "Management Network",
|
||||
Mtu: 1500,
|
||||
|
@ -524,7 +524,7 @@ var HostConfigInfo = types.HostConfigInfo{
|
|||
SubnetMask: "255.0.0.0",
|
||||
IpV6Config: (*types.HostIpConfigIpV6AddressConfiguration)(nil),
|
||||
},
|
||||
Mac: "00:0c:29:81:d8:a0",
|
||||
Mac: "00:0c:29:81:d8:a0",
|
||||
DistributedVirtualPort: (*types.DistributedVirtualSwitchPortConnection)(nil),
|
||||
Portgroup: "Management Network",
|
||||
Mtu: 1500,
|
||||
|
@ -555,7 +555,7 @@ var HostConfigInfo = types.HostConfigInfo{
|
|||
SubnetMask: "255.0.0.0",
|
||||
IpV6Config: (*types.HostIpConfigIpV6AddressConfiguration)(nil),
|
||||
},
|
||||
Mac: "00:0c:29:81:d8:a0",
|
||||
Mac: "00:0c:29:81:d8:a0",
|
||||
DistributedVirtualPort: (*types.DistributedVirtualSwitchPortConnection)(nil),
|
||||
Portgroup: "Management Network",
|
||||
Mtu: 1500,
|
||||
|
@ -586,7 +586,7 @@ var HostConfigInfo = types.HostConfigInfo{
|
|||
SubnetMask: "255.0.0.0",
|
||||
IpV6Config: (*types.HostIpConfigIpV6AddressConfiguration)(nil),
|
||||
},
|
||||
Mac: "00:0c:29:81:d8:a0",
|
||||
Mac: "00:0c:29:81:d8:a0",
|
||||
DistributedVirtualPort: (*types.DistributedVirtualSwitchPortConnection)(nil),
|
||||
Portgroup: "Management Network",
|
||||
Mtu: 1500,
|
||||
|
@ -617,7 +617,7 @@ var HostConfigInfo = types.HostConfigInfo{
|
|||
SubnetMask: "255.0.0.0",
|
||||
IpV6Config: (*types.HostIpConfigIpV6AddressConfiguration)(nil),
|
||||
},
|
||||
Mac: "00:0c:29:81:d8:a0",
|
||||
Mac: "00:0c:29:81:d8:a0",
|
||||
DistributedVirtualPort: (*types.DistributedVirtualSwitchPortConnection)(nil),
|
||||
Portgroup: "Management Network",
|
||||
Mtu: 1500,
|
||||
|
@ -648,7 +648,7 @@ var HostConfigInfo = types.HostConfigInfo{
|
|||
SubnetMask: "255.0.0.0",
|
||||
IpV6Config: (*types.HostIpConfigIpV6AddressConfiguration)(nil),
|
||||
},
|
||||
Mac: "00:0c:29:81:d8:a0",
|
||||
Mac: "00:0c:29:81:d8:a0",
|
||||
DistributedVirtualPort: (*types.DistributedVirtualSwitchPortConnection)(nil),
|
||||
Portgroup: "Management Network",
|
||||
Mtu: 1500,
|
||||
|
@ -679,7 +679,7 @@ var HostConfigInfo = types.HostConfigInfo{
|
|||
SubnetMask: "255.0.0.0",
|
||||
IpV6Config: (*types.HostIpConfigIpV6AddressConfiguration)(nil),
|
||||
},
|
||||
Mac: "00:0c:29:81:d8:a0",
|
||||
Mac: "00:0c:29:81:d8:a0",
|
||||
DistributedVirtualPort: (*types.DistributedVirtualSwitchPortConnection)(nil),
|
||||
Portgroup: "Management Network",
|
||||
Mtu: 1500,
|
||||
|
@ -710,7 +710,7 @@ var HostConfigInfo = types.HostConfigInfo{
|
|||
SubnetMask: "255.0.0.0",
|
||||
IpV6Config: (*types.HostIpConfigIpV6AddressConfiguration)(nil),
|
||||
},
|
||||
Mac: "00:0c:29:81:d8:a0",
|
||||
Mac: "00:0c:29:81:d8:a0",
|
||||
DistributedVirtualPort: (*types.DistributedVirtualSwitchPortConnection)(nil),
|
||||
Portgroup: "Management Network",
|
||||
Mtu: 1500,
|
||||
|
@ -741,7 +741,7 @@ var HostConfigInfo = types.HostConfigInfo{
|
|||
SubnetMask: "255.0.0.0",
|
||||
IpV6Config: (*types.HostIpConfigIpV6AddressConfiguration)(nil),
|
||||
},
|
||||
Mac: "00:0c:29:81:d8:a0",
|
||||
Mac: "00:0c:29:81:d8:a0",
|
||||
DistributedVirtualPort: (*types.DistributedVirtualSwitchPortConnection)(nil),
|
||||
Portgroup: "Management Network",
|
||||
Mtu: 1500,
|
||||
|
|
|
@ -1730,7 +1730,7 @@ var HostSystem = mo.HostSystem{
|
|||
OverallMemoryUsage: 1404,
|
||||
DistributedCpuFairness: 0,
|
||||
DistributedMemoryFairness: 0,
|
||||
Uptime: 77229,
|
||||
Uptime: 77229,
|
||||
},
|
||||
OverallStatus: "gray",
|
||||
RebootRequired: false,
|
||||
|
|
File diff suppressed because it is too large
Load Diff
1213
vendor/github.com/vmware/govmomi/simulator/esx/performance_manager_data.go
generated
vendored
Normal file
1213
vendor/github.com/vmware/govmomi/simulator/esx/performance_manager_data.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
|
@ -60,7 +60,7 @@ var ResourcePool = mo.ResourcePool{
|
|||
DynamicData: types.DynamicData{},
|
||||
Reservation: types.NewInt64(4121),
|
||||
ExpandableReservation: types.NewBool(false),
|
||||
Limit: types.NewInt64(4121),
|
||||
Limit: types.NewInt64(4121),
|
||||
Shares: &types.SharesInfo{
|
||||
DynamicData: types.DynamicData{},
|
||||
Shares: 9000,
|
||||
|
@ -72,7 +72,7 @@ var ResourcePool = mo.ResourcePool{
|
|||
DynamicData: types.DynamicData{},
|
||||
Reservation: types.NewInt64(961),
|
||||
ExpandableReservation: types.NewBool(false),
|
||||
Limit: types.NewInt64(961),
|
||||
Limit: types.NewInt64(961),
|
||||
Shares: &types.SharesInfo{
|
||||
DynamicData: types.DynamicData{},
|
||||
Shares: 9000,
|
||||
|
@ -140,7 +140,7 @@ var ResourcePool = mo.ResourcePool{
|
|||
DynamicData: types.DynamicData{},
|
||||
Reservation: types.NewInt64(4121),
|
||||
ExpandableReservation: types.NewBool(false),
|
||||
Limit: types.NewInt64(4121),
|
||||
Limit: types.NewInt64(4121),
|
||||
Shares: &types.SharesInfo{
|
||||
DynamicData: types.DynamicData{},
|
||||
Shares: 9000,
|
||||
|
@ -152,7 +152,7 @@ var ResourcePool = mo.ResourcePool{
|
|||
DynamicData: types.DynamicData{},
|
||||
Reservation: types.NewInt64(961),
|
||||
ExpandableReservation: types.NewBool(false),
|
||||
Limit: types.NewInt64(961),
|
||||
Limit: types.NewInt64(961),
|
||||
Shares: &types.SharesInfo{
|
||||
DynamicData: types.DynamicData{},
|
||||
Shares: 9000,
|
||||
|
|
|
@ -18,6 +18,9 @@ package esx
|
|||
|
||||
import "github.com/vmware/govmomi/vim25/types"
|
||||
|
||||
// HardwareVersion is the default VirtualMachine.Config.Version
|
||||
var HardwareVersion = "vmx-13"
|
||||
|
||||
// Setting is captured from ESX's HostSystem.configManager.advancedOption
|
||||
// Capture method:
|
||||
// govc object.collect -s -dump $(govc object.collect -s HostSystem:ha-host configManager.advancedOption) setting
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -199,7 +199,7 @@ var VirtualDevice = []types.BaseVirtualDevice{
|
|||
ControllerKey: 100,
|
||||
UnitNumber: types.NewInt32(17),
|
||||
},
|
||||
Id: -1,
|
||||
Id: -1,
|
||||
AllowUnrestrictedCommunication: types.NewBool(false),
|
||||
FilterEnable: types.NewBool(true),
|
||||
FilterInfo: (*types.VirtualMachineVMCIDeviceFilterInfo)(nil),
|
||||
|
|
|
@ -138,11 +138,13 @@ func (m *EventManager) formatMessage(event types.BaseEvent) {
|
|||
}
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := t.Execute(&buf, event); err != nil {
|
||||
log.Print(err)
|
||||
if t != nil {
|
||||
var buf bytes.Buffer
|
||||
if err := t.Execute(&buf, event); err != nil {
|
||||
log.Print(err)
|
||||
}
|
||||
e.FullFormattedMessage = buf.String()
|
||||
}
|
||||
e.FullFormattedMessage = buf.String()
|
||||
|
||||
if logEvents {
|
||||
log.Printf("[%s] %s", id, e.FullFormattedMessage)
|
||||
|
@ -181,6 +183,7 @@ type EventHistoryCollector struct {
|
|||
|
||||
m *EventManager
|
||||
page *ring.Ring
|
||||
pos int
|
||||
}
|
||||
|
||||
// doEntityEventArgument calls f for each entity argument in the event.
|
||||
|
@ -328,6 +331,7 @@ func (c *EventHistoryCollector) eventMatches(event types.BaseEvent) bool {
|
|||
|
||||
// filePage copies the manager's latest events into the collector's page with Filter applied.
|
||||
func (c *EventHistoryCollector) fillPage(size int) {
|
||||
c.pos = 0
|
||||
l := c.page.Len()
|
||||
delta := size - l
|
||||
|
||||
|
@ -392,6 +396,66 @@ func (c *EventHistoryCollector) SetCollectorPageSize(ctx *Context, req *types.Se
|
|||
return body
|
||||
}
|
||||
|
||||
func (c *EventHistoryCollector) RewindCollector(ctx *Context, req *types.RewindCollector) soap.HasFault {
|
||||
c.pos = 0
|
||||
return &methods.RewindCollectorBody{
|
||||
Res: new(types.RewindCollectorResponse),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *EventHistoryCollector) ReadNextEvents(ctx *Context, req *types.ReadNextEvents) soap.HasFault {
|
||||
body := &methods.ReadNextEventsBody{}
|
||||
if req.MaxCount <= 0 {
|
||||
body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "maxCount"})
|
||||
return body
|
||||
}
|
||||
body.Res = new(types.ReadNextEventsResponse)
|
||||
|
||||
events := c.GetLatestPage()
|
||||
nevents := len(events)
|
||||
if c.pos == nevents {
|
||||
return body // already read to EOF
|
||||
}
|
||||
|
||||
start := c.pos
|
||||
end := start + int(req.MaxCount)
|
||||
c.pos += int(req.MaxCount)
|
||||
if end > nevents {
|
||||
end = nevents
|
||||
c.pos = nevents
|
||||
}
|
||||
|
||||
body.Res.Returnval = events[start:end]
|
||||
|
||||
return body
|
||||
}
|
||||
|
||||
func (c *EventHistoryCollector) ReadPreviousEvents(ctx *Context, req *types.ReadPreviousEvents) soap.HasFault {
|
||||
body := &methods.ReadPreviousEventsBody{}
|
||||
if req.MaxCount <= 0 {
|
||||
body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "maxCount"})
|
||||
return body
|
||||
}
|
||||
body.Res = new(types.ReadPreviousEventsResponse)
|
||||
|
||||
events := c.GetLatestPage()
|
||||
if c.pos == 0 {
|
||||
return body // already read to EOF
|
||||
}
|
||||
|
||||
start := c.pos - int(req.MaxCount)
|
||||
end := c.pos
|
||||
c.pos -= int(req.MaxCount)
|
||||
if start < 0 {
|
||||
start = 0
|
||||
c.pos = 0
|
||||
}
|
||||
|
||||
body.Res.Returnval = events[start:end]
|
||||
|
||||
return body
|
||||
}
|
||||
|
||||
func (c *EventHistoryCollector) DestroyCollector(ctx *Context, req *types.DestroyCollector) soap.HasFault {
|
||||
ctx.Session.Remove(req.This)
|
||||
|
||||
|
|
|
@ -178,7 +178,7 @@ func (f *FileManager) moveDatastoreFile(req *types.MoveDatastoreFile_Task) types
|
|||
if !isTrue(req.Force) {
|
||||
_, err := os.Stat(dst)
|
||||
if err == nil {
|
||||
return f.fault(dst, nil, new(types.FileAlreadyExistsFault))
|
||||
return f.fault(dst, nil, new(types.FileAlreadyExists))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -216,7 +216,7 @@ func (f *FileManager) copyDatastoreFile(req *types.CopyDatastoreFile_Task) types
|
|||
if !isTrue(req.Force) {
|
||||
_, err := os.Stat(dst)
|
||||
if err == nil {
|
||||
return f.fault(dst, nil, new(types.FileAlreadyExistsFault))
|
||||
return f.fault(dst, nil, new(types.FileAlreadyExists))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/vim25/methods"
|
||||
"github.com/vmware/govmomi/vim25/mo"
|
||||
"github.com/vmware/govmomi/vim25/soap"
|
||||
|
@ -177,6 +177,9 @@ func (f *Folder) CreateStoragePod(c *types.CreateStoragePod) soap.HasFault {
|
|||
|
||||
pod.Name = c.Name
|
||||
pod.ChildType = []string{"Datastore"}
|
||||
pod.Summary = new(types.StoragePodSummary)
|
||||
pod.PodStorageDrsEntry = new(types.PodStorageDrsEntry)
|
||||
pod.PodStorageDrsEntry.StorageDrsConfig.PodConfig.Enabled = true
|
||||
|
||||
f.putChild(pod)
|
||||
|
||||
|
@ -191,7 +194,10 @@ func (f *Folder) CreateStoragePod(c *types.CreateStoragePod) soap.HasFault {
|
|||
}
|
||||
|
||||
func (p *StoragePod) MoveIntoFolderTask(c *types.MoveIntoFolder_Task) soap.HasFault {
|
||||
return (&Folder{Folder: p.Folder}).MoveIntoFolderTask(c)
|
||||
f := &Folder{Folder: p.Folder}
|
||||
res := f.MoveIntoFolderTask(c)
|
||||
p.ChildEntity = append(p.ChildEntity, f.ChildEntity...)
|
||||
return res
|
||||
}
|
||||
|
||||
func (f *Folder) CreateDatacenter(ctx *Context, c *types.CreateDatacenter) soap.HasFault {
|
||||
|
@ -250,9 +256,26 @@ type createVM struct {
|
|||
register bool
|
||||
}
|
||||
|
||||
// hostsWithDatastore returns hosts that have access to the given datastore path
|
||||
func hostsWithDatastore(hosts []types.ManagedObjectReference, path string) []types.ManagedObjectReference {
|
||||
attached := hosts[:0]
|
||||
var p object.DatastorePath
|
||||
p.FromString(path)
|
||||
|
||||
for _, host := range hosts {
|
||||
h := Map.Get(host).(*HostSystem)
|
||||
if Map.FindByName(p.Datastore, h.Datastore) != nil {
|
||||
attached = append(attached, host)
|
||||
}
|
||||
}
|
||||
|
||||
return attached
|
||||
}
|
||||
|
||||
func (c *createVM) Run(task *Task) (types.AnyType, types.BaseMethodFault) {
|
||||
vm, err := NewVirtualMachine(c.Folder.Self, &c.req.Config)
|
||||
if err != nil {
|
||||
c.Folder.removeChild(vm)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -270,7 +293,7 @@ func (c *createVM) Run(task *Task) (types.AnyType, types.BaseMethodFault) {
|
|||
hosts = cr.Host
|
||||
}
|
||||
|
||||
// Assuming for now that all hosts have access to the datastore
|
||||
hosts = hostsWithDatastore(hosts, c.req.Config.Files.VmPathName)
|
||||
host := hosts[rand.Intn(len(hosts))]
|
||||
vm.Runtime.Host = &host
|
||||
} else {
|
||||
|
@ -290,13 +313,13 @@ func (c *createVM) Run(task *Task) (types.AnyType, types.BaseMethodFault) {
|
|||
|
||||
err = vm.create(&c.req.Config, c.register)
|
||||
if err != nil {
|
||||
c.Folder.removeChild(vm)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.Folder.putChild(vm)
|
||||
|
||||
host := Map.Get(*vm.Runtime.Host).(*HostSystem)
|
||||
Map.AppendReference(host, &host.Vm, vm.Self)
|
||||
vm.EnvironmentBrowser = *hostParent(&host.HostSystem).EnvironmentBrowser
|
||||
|
||||
for i := range vm.Datastore {
|
||||
ds := Map.Get(vm.Datastore[i]).(*Datastore)
|
||||
|
@ -333,6 +356,8 @@ func (c *createVM) Run(task *Task) (types.AnyType, types.BaseMethodFault) {
|
|||
},
|
||||
)
|
||||
|
||||
vm.RefreshStorageInfo(c.ctx, nil)
|
||||
|
||||
return vm.Reference(), nil
|
||||
}
|
||||
|
||||
|
|
|
@ -187,7 +187,7 @@ func (s *searchDatastore) search(ds *types.ManagedObjectReference, folder string
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *searchDatastore) Run(Task *Task) (types.AnyType, types.BaseMethodFault) {
|
||||
func (s *searchDatastore) Run(task *Task) (types.AnyType, types.BaseMethodFault) {
|
||||
p, fault := parseDatastorePath(s.DatastorePath)
|
||||
if fault != nil {
|
||||
return nil, fault
|
||||
|
@ -199,6 +199,8 @@ func (s *searchDatastore) Run(Task *Task) (types.AnyType, types.BaseMethodFault)
|
|||
}
|
||||
|
||||
ds := ref.(*Datastore)
|
||||
task.Info.Entity = &ds.Self // TODO: CreateTask() should require mo.Entity, rather than mo.Reference
|
||||
task.Info.EntityName = ds.Name
|
||||
|
||||
dir := path.Join(ds.Info.GetDatastoreInfo().Url, p.Path)
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package simulator
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
@ -27,11 +28,21 @@ import (
|
|||
"github.com/vmware/govmomi/vim25/types"
|
||||
)
|
||||
|
||||
var (
|
||||
hostPortUnique = os.Getenv("VCSIM_HOST_PORT_UNIQUE") == "true"
|
||||
)
|
||||
|
||||
type HostSystem struct {
|
||||
mo.HostSystem
|
||||
}
|
||||
|
||||
func NewHostSystem(host mo.HostSystem) *HostSystem {
|
||||
if hostPortUnique { // configure unique port for each host
|
||||
port := &esx.HostSystem.Summary.Config.Port
|
||||
*port++
|
||||
host.Summary.Config.Port = *port
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
|
||||
hs := &HostSystem{
|
||||
|
@ -71,6 +82,16 @@ func NewHostSystem(host mo.HostSystem) *HostSystem {
|
|||
return hs
|
||||
}
|
||||
|
||||
func (h *HostSystem) event() types.HostEvent {
|
||||
return types.HostEvent{
|
||||
Event: types.Event{
|
||||
Datacenter: datacenterEventArgument(h),
|
||||
ComputeResource: h.eventArgumentParent(),
|
||||
Host: h.eventArgument(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (h *HostSystem) eventArgument() *types.HostEventArgument {
|
||||
return &types.HostEventArgument{
|
||||
Host: h.Self,
|
||||
|
@ -121,6 +142,7 @@ func CreateDefaultESX(f *Folder) {
|
|||
addComputeResource(summary, host)
|
||||
|
||||
cr := &mo.ComputeResource{Summary: summary}
|
||||
cr.EnvironmentBrowser = newEnvironmentBrowser()
|
||||
cr.Self = *host.Parent
|
||||
cr.Name = host.Name
|
||||
cr.Host = append(cr.Host, host.Reference())
|
||||
|
@ -155,7 +177,8 @@ func CreateStandaloneHost(f *Folder, spec types.HostConnectSpec) (*HostSystem, t
|
|||
ConfigurationEx: &types.ComputeResourceConfigInfo{
|
||||
VmSwapPlacement: string(types.VirtualMachineConfigInfoSwapPlacementTypeVmDirectory),
|
||||
},
|
||||
Summary: summary,
|
||||
Summary: summary,
|
||||
EnvironmentBrowser: newEnvironmentBrowser(),
|
||||
}
|
||||
|
||||
Map.PutEntity(cr, Map.NewEntity(host))
|
||||
|
@ -173,12 +196,14 @@ func CreateStandaloneHost(f *Folder, spec types.HostConnectSpec) (*HostSystem, t
|
|||
return host, nil
|
||||
}
|
||||
|
||||
func (h *HostSystem) DestroyTask(req *types.Destroy_Task) soap.HasFault {
|
||||
func (h *HostSystem) DestroyTask(ctx *Context, req *types.Destroy_Task) soap.HasFault {
|
||||
task := CreateTask(h, "destroy", func(t *Task) (types.AnyType, types.BaseMethodFault) {
|
||||
if len(h.Vm) > 0 {
|
||||
return nil, &types.ResourceInUse{}
|
||||
}
|
||||
|
||||
ctx.postEvent(&types.HostRemovedEvent{HostEvent: h.event()})
|
||||
|
||||
f := Map.getEntityParent(h, "Folder").(*Folder)
|
||||
f.removeChild(h.Reference())
|
||||
|
||||
|
|
|
@ -30,8 +30,8 @@ import (
|
|||
)
|
||||
|
||||
var ipPool = MustNewIpPool(&types.IpPool{
|
||||
Id: 1,
|
||||
Name: "ip-pool",
|
||||
Id: 1,
|
||||
Name: "ip-pool",
|
||||
AvailableIpv4Addresses: 250,
|
||||
AvailableIpv6Addresses: 250,
|
||||
AllocatedIpv6Addresses: 0,
|
||||
|
|
|
@ -113,6 +113,43 @@ func (m *LicenseManager) RemoveLicense(req *types.RemoveLicense) soap.HasFault {
|
|||
return body
|
||||
}
|
||||
|
||||
func (m *LicenseManager) UpdateLicenseLabel(req *types.UpdateLicenseLabel) soap.HasFault {
|
||||
body := &methods.UpdateLicenseLabelBody{}
|
||||
|
||||
for i := range m.Licenses {
|
||||
license := &m.Licenses[i]
|
||||
|
||||
if req.LicenseKey != license.LicenseKey {
|
||||
continue
|
||||
}
|
||||
|
||||
body.Res = new(types.UpdateLicenseLabelResponse)
|
||||
|
||||
for j := range license.Labels {
|
||||
label := &license.Labels[j]
|
||||
|
||||
if label.Key == req.LabelKey {
|
||||
if req.LabelValue == "" {
|
||||
license.Labels = append(license.Labels[:i], license.Labels[i+1:]...)
|
||||
} else {
|
||||
label.Value = req.LabelValue
|
||||
}
|
||||
return body
|
||||
}
|
||||
}
|
||||
|
||||
license.Labels = append(license.Labels, types.KeyValue{
|
||||
Key: req.LabelKey,
|
||||
Value: req.LabelValue,
|
||||
})
|
||||
|
||||
return body
|
||||
}
|
||||
|
||||
body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "licenseKey"})
|
||||
return body
|
||||
}
|
||||
|
||||
type LicenseAssignmentManager struct {
|
||||
mo.LicenseAssignmentManager
|
||||
}
|
||||
|
|
|
@ -30,6 +30,21 @@ import (
|
|||
"github.com/vmware/govmomi/vim25/types"
|
||||
)
|
||||
|
||||
type DelayConfig struct {
|
||||
// Delay specifies the number of milliseconds to delay serving a SOAP call. 0 means no delay.
|
||||
// This can be used to simulate a poorly performing vCenter or network lag.
|
||||
Delay int
|
||||
|
||||
// Delay specifies the number of milliseconds to delay serving a specific method.
|
||||
// Each entry in the map represents the name of a method and its associated delay in milliseconds,
|
||||
// This can be used to simulate a poorly performing vCenter or network lag.
|
||||
MethodDelay map[string]int
|
||||
|
||||
// DelayJitter defines the delay jitter as a coefficient of variation (stddev/mean).
|
||||
// This can be used to simulate unpredictable delay. 0 means no jitter, i.e. all invocations get the same delay.
|
||||
DelayJitter float64
|
||||
}
|
||||
|
||||
// Model is used to populate a Model with an initial set of managed entities.
|
||||
// This is a simple helper for tests running against a simulator, to populate an inventory
|
||||
// with commonly used models.
|
||||
|
@ -79,6 +94,9 @@ type Model struct {
|
|||
// Pod specifies the number of StoragePod to create per Cluster
|
||||
Pod int
|
||||
|
||||
// Delay configurations
|
||||
DelayConfig DelayConfig
|
||||
|
||||
// total number of inventory objects, set by Count()
|
||||
total int
|
||||
|
||||
|
@ -93,6 +111,11 @@ func ESX() *Model {
|
|||
Autostart: true,
|
||||
Datastore: 1,
|
||||
Machine: 2,
|
||||
DelayConfig: DelayConfig{
|
||||
Delay: 0,
|
||||
DelayJitter: 0,
|
||||
MethodDelay: nil,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -109,6 +132,11 @@ func VPX() *Model {
|
|||
ClusterHost: 3,
|
||||
Datastore: 1,
|
||||
Machine: 2,
|
||||
DelayConfig: DelayConfig{
|
||||
Delay: 0,
|
||||
DelayJitter: 0,
|
||||
MethodDelay: nil,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -454,6 +482,9 @@ func (m *Model) Create() error {
|
|||
}
|
||||
}
|
||||
|
||||
// Turn on delay AFTER we're done building the service content
|
||||
m.Service.delay = &m.DelayConfig
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
Copyright (c) 2017-2018 VMware, Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package simulator
|
||||
|
||||
import (
|
||||
"github.com/vmware/govmomi/vim25/methods"
|
||||
"github.com/vmware/govmomi/vim25/soap"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
)
|
||||
|
||||
func SetCustomValue(ctx *Context, req *types.SetCustomValue) soap.HasFault {
|
||||
ctx.Caller = &req.This
|
||||
body := &methods.SetCustomValueBody{}
|
||||
|
||||
cfm := Map.CustomFieldsManager()
|
||||
|
||||
_, field := cfm.findByNameType(req.Key, req.This.Type)
|
||||
if field == nil {
|
||||
body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "key"})
|
||||
return body
|
||||
}
|
||||
|
||||
res := cfm.SetField(ctx, &types.SetField{
|
||||
This: cfm.Reference(),
|
||||
Entity: req.This,
|
||||
Key: field.Key,
|
||||
Value: req.Value,
|
||||
})
|
||||
|
||||
if res.Fault() != nil {
|
||||
body.Fault_ = res.Fault()
|
||||
return body
|
||||
}
|
||||
|
||||
body.Res = &types.SetCustomValueResponse{}
|
||||
return body
|
||||
}
|
|
@ -17,19 +17,243 @@ limitations under the License.
|
|||
package simulator
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/simulator/esx"
|
||||
"github.com/vmware/govmomi/simulator/vpx"
|
||||
"github.com/vmware/govmomi/vim25/methods"
|
||||
"github.com/vmware/govmomi/vim25/mo"
|
||||
"github.com/vmware/govmomi/vim25/soap"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
)
|
||||
|
||||
var realtimeProviderSummary = types.PerfProviderSummary{
|
||||
CurrentSupported: true,
|
||||
SummarySupported: true,
|
||||
RefreshRate: 20,
|
||||
}
|
||||
|
||||
var historicProviderSummary = types.PerfProviderSummary{
|
||||
CurrentSupported: false,
|
||||
SummarySupported: true,
|
||||
RefreshRate: -1,
|
||||
}
|
||||
|
||||
type PerformanceManager struct {
|
||||
mo.PerformanceManager
|
||||
vmMetrics []types.PerfMetricId
|
||||
hostMetrics []types.PerfMetricId
|
||||
rpMetrics []types.PerfMetricId
|
||||
clusterMetrics []types.PerfMetricId
|
||||
datastoreMetrics []types.PerfMetricId
|
||||
datacenterMetrics []types.PerfMetricId
|
||||
perfCounterIndex map[int32]types.PerfCounterInfo
|
||||
metricData map[string]map[int32][]int64
|
||||
}
|
||||
|
||||
func NewPerformanceManager(ref types.ManagedObjectReference) object.Reference {
|
||||
m := &PerformanceManager{}
|
||||
m.Self = ref
|
||||
m.PerfCounter = esx.PerfCounter
|
||||
if Map.IsESX() {
|
||||
m.PerfCounter = esx.PerfCounter[:]
|
||||
m.hostMetrics = esx.HostMetrics[:]
|
||||
m.vmMetrics = esx.VmMetrics[:]
|
||||
m.rpMetrics = esx.ResourcePoolMetrics[:]
|
||||
m.metricData = esx.MetricData
|
||||
} else {
|
||||
m.PerfCounter = vpx.PerfCounter[:]
|
||||
m.hostMetrics = vpx.HostMetrics[:]
|
||||
m.vmMetrics = vpx.VmMetrics[:]
|
||||
m.rpMetrics = vpx.ResourcePoolMetrics[:]
|
||||
m.clusterMetrics = vpx.ClusterMetrics[:]
|
||||
m.datastoreMetrics = vpx.DatastoreMetrics[:]
|
||||
m.datacenterMetrics = vpx.DatacenterMetrics[:]
|
||||
m.metricData = vpx.MetricData
|
||||
}
|
||||
m.perfCounterIndex = make(map[int32]types.PerfCounterInfo, len(m.PerfCounter))
|
||||
for _, p := range m.PerfCounter {
|
||||
m.perfCounterIndex[p.Key] = p
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (p *PerformanceManager) QueryPerfCounter(ctx *Context, req *types.QueryPerfCounter) soap.HasFault {
|
||||
body := new(methods.QueryPerfCounterBody)
|
||||
body.Req = req
|
||||
body.Res.Returnval = make([]types.PerfCounterInfo, len(req.CounterId))
|
||||
for i, id := range req.CounterId {
|
||||
if info, ok := p.perfCounterIndex[id]; !ok {
|
||||
body.Fault_ = Fault("", &types.InvalidArgument{
|
||||
InvalidProperty: "CounterId",
|
||||
})
|
||||
return body
|
||||
} else {
|
||||
body.Res.Returnval[i] = info
|
||||
}
|
||||
}
|
||||
return body
|
||||
}
|
||||
|
||||
func (p *PerformanceManager) QueryPerfProviderSummary(ctx *Context, req *types.QueryPerfProviderSummary) soap.HasFault {
|
||||
body := new(methods.QueryPerfProviderSummaryBody)
|
||||
body.Req = req
|
||||
body.Res = new(types.QueryPerfProviderSummaryResponse)
|
||||
|
||||
// The entity must exist
|
||||
if Map.Get(req.Entity) == nil {
|
||||
body.Fault_ = Fault("", &types.InvalidArgument{
|
||||
InvalidProperty: "Entity",
|
||||
})
|
||||
return body
|
||||
}
|
||||
|
||||
switch req.Entity.Type {
|
||||
case "VirtualMachine", "HostSystem", "ResourcePool":
|
||||
body.Res.Returnval = realtimeProviderSummary
|
||||
default:
|
||||
body.Res.Returnval = historicProviderSummary
|
||||
}
|
||||
body.Res.Returnval.Entity = req.Entity
|
||||
return body
|
||||
}
|
||||
|
||||
func (p *PerformanceManager) buildAvailablePerfMetricsQueryResponse(ids []types.PerfMetricId, numCPU int, datastoreURL string) *types.QueryAvailablePerfMetricResponse {
|
||||
r := new(types.QueryAvailablePerfMetricResponse)
|
||||
r.Returnval = make([]types.PerfMetricId, 0, len(ids))
|
||||
for _, id := range ids {
|
||||
switch id.Instance {
|
||||
case "$cpu":
|
||||
for i := 0; i < numCPU; i++ {
|
||||
r.Returnval = append(r.Returnval, types.PerfMetricId{CounterId: id.CounterId, Instance: strconv.Itoa(i)})
|
||||
}
|
||||
case "$physDisk":
|
||||
r.Returnval = append(r.Returnval, types.PerfMetricId{CounterId: id.CounterId, Instance: datastoreURL})
|
||||
case "$file":
|
||||
r.Returnval = append(r.Returnval, types.PerfMetricId{CounterId: id.CounterId, Instance: "DISKFILE"})
|
||||
r.Returnval = append(r.Returnval, types.PerfMetricId{CounterId: id.CounterId, Instance: "DELTAFILE"})
|
||||
r.Returnval = append(r.Returnval, types.PerfMetricId{CounterId: id.CounterId, Instance: "SWAPFILE"})
|
||||
r.Returnval = append(r.Returnval, types.PerfMetricId{CounterId: id.CounterId, Instance: "OTHERFILE"})
|
||||
default:
|
||||
r.Returnval = append(r.Returnval, types.PerfMetricId{CounterId: id.CounterId, Instance: id.Instance})
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (p *PerformanceManager) queryAvailablePerfMetric(entity types.ManagedObjectReference, interval int32) *types.QueryAvailablePerfMetricResponse {
|
||||
switch entity.Type {
|
||||
case "VirtualMachine":
|
||||
vm := Map.Get(entity).(*VirtualMachine)
|
||||
return p.buildAvailablePerfMetricsQueryResponse(p.vmMetrics, int(vm.Summary.Config.NumCpu), vm.Datastore[0].Value)
|
||||
case "HostSystem":
|
||||
host := Map.Get(entity).(*HostSystem)
|
||||
return p.buildAvailablePerfMetricsQueryResponse(p.hostMetrics, int(host.Hardware.CpuInfo.NumCpuThreads), host.Datastore[0].Value)
|
||||
case "ResourcePool":
|
||||
return p.buildAvailablePerfMetricsQueryResponse(p.rpMetrics, 0, "")
|
||||
case "ClusterComputeResource":
|
||||
if interval != 20 {
|
||||
return p.buildAvailablePerfMetricsQueryResponse(p.clusterMetrics, 0, "")
|
||||
}
|
||||
case "Datastore":
|
||||
if interval != 20 {
|
||||
return p.buildAvailablePerfMetricsQueryResponse(p.datastoreMetrics, 0, "")
|
||||
}
|
||||
case "Datacenter":
|
||||
if interval != 20 {
|
||||
return p.buildAvailablePerfMetricsQueryResponse(p.datacenterMetrics, 0, "")
|
||||
}
|
||||
}
|
||||
|
||||
// Don't know how to handle this. Return empty response.
|
||||
return new(types.QueryAvailablePerfMetricResponse)
|
||||
}
|
||||
|
||||
func (p *PerformanceManager) QueryAvailablePerfMetric(ctx *Context, req *types.QueryAvailablePerfMetric) soap.HasFault {
|
||||
body := new(methods.QueryAvailablePerfMetricBody)
|
||||
body.Req = req
|
||||
body.Res = p.queryAvailablePerfMetric(req.Entity, req.IntervalId)
|
||||
|
||||
return body
|
||||
}
|
||||
|
||||
func (p *PerformanceManager) QueryPerf(ctx *Context, req *types.QueryPerf) soap.HasFault {
|
||||
body := new(methods.QueryPerfBody)
|
||||
body.Req = req
|
||||
body.Res = new(types.QueryPerfResponse)
|
||||
body.Res.Returnval = make([]types.BasePerfEntityMetricBase, len(req.QuerySpec))
|
||||
|
||||
for i, qs := range req.QuerySpec {
|
||||
metrics := new(types.PerfEntityMetric)
|
||||
metrics.Entity = qs.Entity
|
||||
|
||||
// Get metric data for this entity type
|
||||
metricData, ok := p.metricData[qs.Entity.Type]
|
||||
if !ok {
|
||||
body.Fault_ = Fault("", &types.InvalidArgument{
|
||||
InvalidProperty: "Entity",
|
||||
})
|
||||
}
|
||||
var start, end time.Time
|
||||
if qs.StartTime == nil {
|
||||
start = time.Now().Add(time.Duration(-365*24) * time.Hour) // Assume we have data for a year
|
||||
} else {
|
||||
start = *qs.StartTime
|
||||
}
|
||||
if qs.EndTime == nil {
|
||||
end = time.Now()
|
||||
} else {
|
||||
end = *qs.EndTime
|
||||
}
|
||||
|
||||
// Generate metric series. Divide into n buckets of interval seconds
|
||||
interval := qs.IntervalId
|
||||
if interval == -1 || interval == 0 {
|
||||
interval = 20 // TODO: Determine from entity type
|
||||
}
|
||||
n := 1 + int32(end.Sub(start).Seconds())/interval
|
||||
if n > qs.MaxSample {
|
||||
n = qs.MaxSample
|
||||
}
|
||||
|
||||
// Loop through each interval "tick"
|
||||
metrics.SampleInfo = make([]types.PerfSampleInfo, n)
|
||||
metrics.Value = make([]types.BasePerfMetricSeries, len(qs.MetricId))
|
||||
for tick := int32(0); tick < n; tick++ {
|
||||
metrics.SampleInfo[tick] = types.PerfSampleInfo{Timestamp: end.Add(time.Duration(-interval*tick) * time.Second), Interval: interval}
|
||||
}
|
||||
|
||||
for j, mid := range qs.MetricId {
|
||||
// Create list of metrics for this tick
|
||||
series := &types.PerfMetricIntSeries{Value: make([]int64, n)}
|
||||
series.Id = mid
|
||||
points := metricData[mid.CounterId]
|
||||
offset := int64(start.Unix()) / int64(interval)
|
||||
|
||||
for tick := int32(0); tick < n; tick++ {
|
||||
var p int64
|
||||
|
||||
// Use sample data if we have it. Otherwise, just send 0.
|
||||
if len(points) > 0 {
|
||||
p = points[(offset+int64(tick))%int64(len(points))]
|
||||
scale := p / 5
|
||||
if scale > 0 {
|
||||
// Add some gaussian noise to make the data look more "real"
|
||||
p += int64(rand.NormFloat64() * float64(scale))
|
||||
if p < 0 {
|
||||
p = 0
|
||||
}
|
||||
}
|
||||
} else {
|
||||
p = 0
|
||||
}
|
||||
series.Value[tick] = p
|
||||
}
|
||||
metrics.Value[j] = series
|
||||
}
|
||||
body.Res.Returnval[i] = metrics
|
||||
}
|
||||
return body
|
||||
}
|
||||
|
|
|
@ -337,6 +337,7 @@ func (rr *retrieveResult) collect(ctx *Context, ref types.ManagedObjectReference
|
|||
}
|
||||
|
||||
rtype := rval.Type()
|
||||
match := false
|
||||
|
||||
for _, spec := range rr.req.SpecSet {
|
||||
for _, p := range spec.PropSet {
|
||||
|
@ -348,7 +349,7 @@ func (rr *retrieveResult) collect(ctx *Context, ref types.ManagedObjectReference
|
|||
continue
|
||||
}
|
||||
}
|
||||
|
||||
match = true
|
||||
if isTrue(p.All) {
|
||||
rr.collectAll(ctx, rval, rtype, &content)
|
||||
continue
|
||||
|
@ -358,7 +359,7 @@ func (rr *retrieveResult) collect(ctx *Context, ref types.ManagedObjectReference
|
|||
}
|
||||
}
|
||||
|
||||
if len(content.PropSet) != 0 || len(content.MissingSet) != 0 {
|
||||
if match {
|
||||
rr.Objects = append(rr.Objects, content)
|
||||
}
|
||||
|
||||
|
@ -498,7 +499,12 @@ func (pc *PropertyCollector) RetrievePropertiesEx(ctx *Context, r *types.Retriev
|
|||
res, fault := pc.collect(ctx, r)
|
||||
|
||||
if fault != nil {
|
||||
body.Fault_ = Fault("", fault)
|
||||
switch fault.(type) {
|
||||
case *types.ManagedObjectNotFound:
|
||||
body.Fault_ = Fault("The object has already been deleted or has not been completely created", fault)
|
||||
default:
|
||||
body.Fault_ = Fault("", fault)
|
||||
}
|
||||
} else {
|
||||
objects := res.Objects[:0]
|
||||
for _, o := range res.Objects {
|
||||
|
@ -627,9 +633,14 @@ func (pc *PropertyCollector) apply(ctx *Context, update *types.UpdateSet) types.
|
|||
|
||||
func (pc *PropertyCollector) WaitForUpdatesEx(ctx *Context, r *types.WaitForUpdatesEx) soap.HasFault {
|
||||
wait, cancel := context.WithCancel(context.Background())
|
||||
oneUpdate := false
|
||||
if r.Options != nil {
|
||||
if max := r.Options.MaxWaitSeconds; max != nil {
|
||||
wait, cancel = context.WithTimeout(context.Background(), time.Second*time.Duration(*max))
|
||||
// A value of 0 causes WaitForUpdatesEx to do one update calculation and return any results.
|
||||
oneUpdate = (*max == 0)
|
||||
if *max > 0 {
|
||||
wait, cancel = context.WithTimeout(context.Background(), time.Second*time.Duration(*max))
|
||||
}
|
||||
}
|
||||
}
|
||||
pc.mu.Lock()
|
||||
|
@ -688,6 +699,10 @@ func (pc *PropertyCollector) WaitForUpdatesEx(ctx *Context, r *types.WaitForUpda
|
|||
pc.updates = nil // clear updates collected by the managed object CRUD listeners
|
||||
pc.mu.Unlock()
|
||||
if len(updates) == 0 {
|
||||
if oneUpdate == true {
|
||||
body.Res.Returnval = nil
|
||||
return body
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -732,6 +747,10 @@ func (pc *PropertyCollector) WaitForUpdatesEx(ctx *Context, r *types.WaitForUpda
|
|||
if len(set.FilterSet) != 0 {
|
||||
return body
|
||||
}
|
||||
if oneUpdate == true {
|
||||
body.Res.Returnval = nil
|
||||
return body
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,9 +47,18 @@ func (f *PropertyFilter) DestroyPropertyFilter(ctx *Context, c *types.DestroyPro
|
|||
|
||||
// matches returns true if the change matches one of the filter Spec.PropSet
|
||||
func (f *PropertyFilter) matches(ctx *Context, ref types.ManagedObjectReference, change *types.PropertyChange) bool {
|
||||
var kind reflect.Type
|
||||
|
||||
for _, p := range f.Spec.PropSet {
|
||||
if p.Type != ref.Type {
|
||||
continue
|
||||
if kind == nil {
|
||||
kind = getManagedObject(ctx.Map.Get(ref)).Type()
|
||||
}
|
||||
// e.g. ManagedEntity, ComputeResource
|
||||
field, ok := kind.FieldByName(p.Type)
|
||||
if !(ok && field.Anonymous) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if isTrue(p.All) {
|
||||
|
|
|
@ -57,14 +57,24 @@ type RegisterObject interface {
|
|||
|
||||
// Registry manages a map of mo.Reference objects
|
||||
type Registry struct {
|
||||
counter int64 // Keep first to ensure 64-bit alignment
|
||||
m sync.Mutex
|
||||
objects map[types.ManagedObjectReference]mo.Reference
|
||||
handlers map[types.ManagedObjectReference]RegisterObject
|
||||
locks map[types.ManagedObjectReference]sync.Locker
|
||||
counter int64
|
||||
|
||||
Namespace string
|
||||
Path string
|
||||
|
||||
tagManager tagManager
|
||||
}
|
||||
|
||||
// tagManager is an interface to simplify internal interaction with the vapi tag manager simulator.
|
||||
type tagManager interface {
|
||||
AttachedObjects(types.VslmTagEntry) ([]types.ManagedObjectReference, types.BaseMethodFault)
|
||||
AttachedTags(id types.ManagedObjectReference) ([]types.VslmTagEntry, types.BaseMethodFault)
|
||||
AttachTag(types.ManagedObjectReference, types.VslmTagEntry) types.BaseMethodFault
|
||||
DetachTag(types.ManagedObjectReference, types.VslmTagEntry) types.BaseMethodFault
|
||||
}
|
||||
|
||||
// NewRegistry creates a new instances of Registry
|
||||
|
@ -177,6 +187,24 @@ func (r *Registry) Any(kind string) mo.Entity {
|
|||
return nil
|
||||
}
|
||||
|
||||
// All returns all entities of type specified by kind.
|
||||
// If kind is empty - all entities will be returned.
|
||||
func (r *Registry) All(kind string) []mo.Entity {
|
||||
r.m.Lock()
|
||||
defer r.m.Unlock()
|
||||
|
||||
var entities []mo.Entity
|
||||
for ref, val := range r.objects {
|
||||
if kind == "" || ref.Type == kind {
|
||||
if e, ok := val.(mo.Entity); ok {
|
||||
entities = append(entities, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return entities
|
||||
}
|
||||
|
||||
// applyHandlers calls the given func for each r.handlers
|
||||
func (r *Registry) applyHandlers(f func(o RegisterObject)) {
|
||||
r.m.Lock()
|
||||
|
@ -443,6 +471,11 @@ func (r *Registry) OptionManager() *OptionManager {
|
|||
return r.Get(r.content().Setting.Reference()).(*OptionManager)
|
||||
}
|
||||
|
||||
// CustomFieldsManager returns CustomFieldsManager singleton
|
||||
func (r *Registry) CustomFieldsManager() *CustomFieldsManager {
|
||||
return r.Get(r.content().CustomFieldsManager.Reference()).(*CustomFieldsManager)
|
||||
}
|
||||
|
||||
func (r *Registry) MarshalJSON() ([]byte, error) {
|
||||
r.m.Lock()
|
||||
defer r.m.Unlock()
|
||||
|
|
|
@ -163,3 +163,101 @@ func (s *SearchIndex) FindByUuid(req *types.FindByUuid) soap.HasFault {
|
|||
|
||||
return body
|
||||
}
|
||||
|
||||
func (s *SearchIndex) FindByDnsName(req *types.FindByDnsName) soap.HasFault {
|
||||
body := &methods.FindByDnsNameBody{Res: new(types.FindByDnsNameResponse)}
|
||||
|
||||
all := types.FindAllByDnsName(*req)
|
||||
|
||||
switch r := s.FindAllByDnsName(&all).(type) {
|
||||
case *methods.FindAllByDnsNameBody:
|
||||
if len(r.Res.Returnval) > 0 {
|
||||
body.Res.Returnval = &r.Res.Returnval[0]
|
||||
}
|
||||
default:
|
||||
// no need until FindAllByDnsName below returns a Fault
|
||||
}
|
||||
|
||||
return body
|
||||
}
|
||||
|
||||
func (s *SearchIndex) FindAllByDnsName(req *types.FindAllByDnsName) soap.HasFault {
|
||||
body := &methods.FindAllByDnsNameBody{Res: new(types.FindAllByDnsNameResponse)}
|
||||
|
||||
if req.VmSearch {
|
||||
// Find Virtual Machine using DNS name
|
||||
for ref, obj := range Map.objects {
|
||||
vm, ok := obj.(*VirtualMachine)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if vm.Guest.HostName == req.DnsName {
|
||||
body.Res.Returnval = append(body.Res.Returnval, ref)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Find Host System using DNS name
|
||||
for ref, obj := range Map.objects {
|
||||
host, ok := obj.(*HostSystem)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
for _, net := range host.Config.Network.NetStackInstance {
|
||||
if net.DnsConfig.GetHostDnsConfig().HostName == req.DnsName {
|
||||
body.Res.Returnval = append(body.Res.Returnval, ref)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return body
|
||||
}
|
||||
|
||||
func (s *SearchIndex) FindByIp(req *types.FindByIp) soap.HasFault {
|
||||
body := &methods.FindByIpBody{Res: new(types.FindByIpResponse)}
|
||||
|
||||
all := types.FindAllByIp(*req)
|
||||
|
||||
switch r := s.FindAllByIp(&all).(type) {
|
||||
case *methods.FindAllByIpBody:
|
||||
if len(r.Res.Returnval) > 0 {
|
||||
body.Res.Returnval = &r.Res.Returnval[0]
|
||||
}
|
||||
default:
|
||||
// no need until FindAllByIp below returns a Fault
|
||||
}
|
||||
|
||||
return body
|
||||
}
|
||||
|
||||
func (s *SearchIndex) FindAllByIp(req *types.FindAllByIp) soap.HasFault {
|
||||
body := &methods.FindAllByIpBody{Res: new(types.FindAllByIpResponse)}
|
||||
|
||||
if req.VmSearch {
|
||||
// Find Virtual Machine using IP
|
||||
for ref, obj := range Map.objects {
|
||||
vm, ok := obj.(*VirtualMachine)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if vm.Guest.IpAddress == req.Ip {
|
||||
body.Res.Returnval = append(body.Res.Returnval, ref)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Find Host System using IP
|
||||
for ref, obj := range Map.objects {
|
||||
host, ok := obj.(*HostSystem)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
for _, net := range host.Config.Network.Vnic {
|
||||
if net.Spec.Ip.IpAddress == req.Ip {
|
||||
body.Res.Returnval = append(body.Res.Returnval, ref)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return body
|
||||
}
|
||||
|
|
|
@ -69,6 +69,14 @@ func NewServiceInstance(content types.ServiceContent, folder mo.Folder) *Service
|
|||
NewTaskManager(*s.Content.TaskManager),
|
||||
NewUserDirectory(*s.Content.UserDirectory),
|
||||
NewOptionManager(s.Content.Setting, setting),
|
||||
NewStorageResourceManager(*s.Content.StorageResourceManager),
|
||||
}
|
||||
|
||||
switch content.VStorageObjectManager.Type {
|
||||
case "HostVStorageObjectManager":
|
||||
// TODO: NewHostVStorageObjectManager(*content.VStorageObjectManager)
|
||||
case "VcenterVStorageObjectManager":
|
||||
objects = append(objects, NewVcenterVStorageObjectManager(*content.VStorageObjectManager))
|
||||
}
|
||||
|
||||
if s.Content.CustomFieldsManager != nil {
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/session"
|
||||
"github.com/vmware/govmomi/vim25/methods"
|
||||
|
@ -173,6 +172,23 @@ func (s *SessionManager) TerminateSession(ctx *Context, req *types.TerminateSess
|
|||
return body
|
||||
}
|
||||
|
||||
func (s *SessionManager) SessionIsActive(ctx *Context, req *types.SessionIsActive) soap.HasFault {
|
||||
body := new(methods.SessionIsActiveBody)
|
||||
|
||||
if ctx.Map.IsESX() {
|
||||
body.Fault_ = Fault("", new(types.NotImplemented))
|
||||
return body
|
||||
}
|
||||
|
||||
body.Res = new(types.SessionIsActiveResponse)
|
||||
|
||||
if session, exists := s.sessions[req.SessionID]; exists {
|
||||
body.Res.Returnval = session.UserName == req.UserName
|
||||
}
|
||||
|
||||
return body
|
||||
}
|
||||
|
||||
func (s *SessionManager) AcquireCloneTicket(ctx *Context, _ *types.AcquireCloneTicket) soap.HasFault {
|
||||
session := *ctx.Session
|
||||
session.Key = uuid.New().String()
|
||||
|
@ -234,7 +250,7 @@ var invalidLogin = Fault("Login failure", new(types.InvalidLogin))
|
|||
type Context struct {
|
||||
req *http.Request
|
||||
res http.ResponseWriter
|
||||
m *SessionManager
|
||||
svc *Service
|
||||
|
||||
context.Context
|
||||
Session *Session
|
||||
|
@ -246,7 +262,7 @@ type Context struct {
|
|||
// mapSession maps an HTTP cookie to a Session.
|
||||
func (c *Context) mapSession() {
|
||||
if cookie, err := c.req.Cookie(soap.SessionCookieName); err == nil {
|
||||
if val, ok := c.m.sessions[cookie.Value]; ok {
|
||||
if val, ok := c.svc.sm.sessions[cookie.Value]; ok {
|
||||
c.SetSession(val, false)
|
||||
}
|
||||
}
|
||||
|
@ -258,7 +274,7 @@ func (c *Context) SetSession(session Session, login bool) {
|
|||
session.IpAddress = strings.Split(c.req.RemoteAddr, ":")[0]
|
||||
session.LastActiveTime = time.Now()
|
||||
|
||||
c.m.sessions[session.Key] = session
|
||||
c.svc.sm.sessions[session.Key] = session
|
||||
c.Session = &session
|
||||
|
||||
if login {
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
@ -39,6 +40,7 @@ import (
|
|||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/vmware/govmomi/find"
|
||||
"github.com/vmware/govmomi/object"
|
||||
|
@ -65,6 +67,7 @@ type Service struct {
|
|||
client *vim25.Client
|
||||
sm *SessionManager
|
||||
sdk map[string]*Registry
|
||||
delay *DelayConfig
|
||||
|
||||
readAll func(io.Reader) ([]byte, error)
|
||||
|
||||
|
@ -122,7 +125,7 @@ func (s *Service) call(ctx *Context, method *Method) soap.HasFault {
|
|||
|
||||
if session == nil {
|
||||
switch method.Name {
|
||||
case "RetrieveServiceContent", "List", "Login", "LoginByToken", "LoginExtensionByCertificate", "RetrieveProperties", "RetrievePropertiesEx", "CloneSession":
|
||||
case "RetrieveServiceContent", "PbmRetrieveServiceContent", "List", "Login", "LoginByToken", "LoginExtensionByCertificate", "RetrieveProperties", "RetrievePropertiesEx", "CloneSession":
|
||||
// ok for now, TODO: authz
|
||||
default:
|
||||
fault := &types.NotAuthenticated{
|
||||
|
@ -147,7 +150,8 @@ func (s *Service) call(ctx *Context, method *Method) soap.HasFault {
|
|||
return &serverFaultBody{Reason: Fault(msg, fault)}
|
||||
}
|
||||
|
||||
name := method.Name
|
||||
// Lowercase methods can't be accessed outside their package
|
||||
name := strings.Title(method.Name)
|
||||
|
||||
if strings.HasSuffix(name, vTaskSuffix) {
|
||||
// Make golint happy renaming "Foo_Task" -> "FooTask"
|
||||
|
@ -172,6 +176,25 @@ func (s *Service) call(ctx *Context, method *Method) soap.HasFault {
|
|||
}
|
||||
}
|
||||
|
||||
// We have a valid call. Introduce a delay if requested
|
||||
//
|
||||
if s.delay != nil {
|
||||
d := 0
|
||||
if s.delay.Delay > 0 {
|
||||
d = s.delay.Delay
|
||||
}
|
||||
if md, ok := s.delay.MethodDelay[method.Name]; ok {
|
||||
d += md
|
||||
}
|
||||
if s.delay.DelayJitter > 0 {
|
||||
d += int(rand.NormFloat64() * s.delay.DelayJitter * float64(d))
|
||||
}
|
||||
if d > 0 {
|
||||
//fmt.Printf("Delaying method %s %d ms\n", name, d)
|
||||
time.Sleep(time.Duration(d) * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
var args, res []reflect.Value
|
||||
if m.Type().NumIn() == 2 {
|
||||
args = append(args, reflect.ValueOf(ctx))
|
||||
|
@ -230,16 +253,36 @@ type soapEnvelope struct {
|
|||
Body interface{} `xml:"soapenv:Body"`
|
||||
}
|
||||
|
||||
type faultDetail struct {
|
||||
Fault types.AnyType
|
||||
}
|
||||
|
||||
// soapFault is a copy of soap.Fault, with the same changes as soapEnvelope
|
||||
type soapFault struct {
|
||||
XMLName xml.Name `xml:"soapenv:Fault"`
|
||||
Code string `xml:"faultcode"`
|
||||
String string `xml:"faultstring"`
|
||||
Detail struct {
|
||||
Fault types.AnyType `xml:",any,typeattr"`
|
||||
Fault *faultDetail
|
||||
} `xml:"detail"`
|
||||
}
|
||||
|
||||
// MarshalXML renames the start element from "Fault" to "${Type}Fault"
|
||||
func (d *faultDetail) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
kind := reflect.TypeOf(d.Fault).Elem().Name()
|
||||
start.Name.Local = kind + "Fault"
|
||||
start.Attr = append(start.Attr,
|
||||
xml.Attr{
|
||||
Name: xml.Name{Local: "xmlns"},
|
||||
Value: "urn:" + vim25.Namespace,
|
||||
},
|
||||
xml.Attr{
|
||||
Name: xml.Name{Local: "xsi:type"},
|
||||
Value: kind,
|
||||
})
|
||||
return e.EncodeElement(d.Fault, start)
|
||||
}
|
||||
|
||||
// About generates some info about the simulator.
|
||||
func (s *Service) About(w http.ResponseWriter, r *http.Request) {
|
||||
var about struct {
|
||||
|
@ -289,6 +332,16 @@ func (s *Service) About(w http.ResponseWriter, r *http.Request) {
|
|||
_ = enc.Encode(&about)
|
||||
}
|
||||
|
||||
// Handle registers the handler for the given pattern with Service.ServeMux.
|
||||
func (s *Service) Handle(pattern string, handler http.Handler) {
|
||||
s.ServeMux.Handle(pattern, handler)
|
||||
// Not ideal, but avoids having to add yet another registration mechanism
|
||||
// so we can optionally use vapi/simulator internally.
|
||||
if m, ok := handler.(tagManager); ok {
|
||||
s.sdk[vim25.Path].tagManager = m
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterSDK adds an HTTP handler for the Registry's Path and Namespace.
|
||||
func (s *Service) RegisterSDK(r *Registry) {
|
||||
if s.ServeMux == nil {
|
||||
|
@ -321,7 +374,7 @@ func (s *Service) ServeSDK(w http.ResponseWriter, r *http.Request) {
|
|||
ctx := &Context{
|
||||
req: r,
|
||||
res: w,
|
||||
m: s.sm,
|
||||
svc: s,
|
||||
|
||||
Map: s.sdk[r.URL.Path],
|
||||
Context: context.Background(),
|
||||
|
@ -350,7 +403,9 @@ func (s *Service) ServeSDK(w http.ResponseWriter, r *http.Request) {
|
|||
&soapFault{
|
||||
Code: f.Code,
|
||||
String: f.String,
|
||||
Detail: f.Detail,
|
||||
Detail: struct {
|
||||
Fault *faultDetail
|
||||
}{&faultDetail{f.Detail.Fault}},
|
||||
},
|
||||
}
|
||||
} else {
|
||||
|
@ -431,6 +486,9 @@ func (s *Service) ServeDatastore(w http.ResponseWriter, r *http.Request) {
|
|||
// File does not exist, fallthrough to create via PUT logic
|
||||
fallthrough
|
||||
case "PUT":
|
||||
dir := path.Dir(p)
|
||||
_ = os.MkdirAll(dir, 0700)
|
||||
|
||||
f, err := os.Create(p)
|
||||
if err != nil {
|
||||
log.Printf("failed to %s '%s': %s", r.Method, p, err)
|
||||
|
@ -465,6 +523,37 @@ func (*Service) ServiceVersions(w http.ResponseWriter, r *http.Request) {
|
|||
fmt.Fprint(w, versions)
|
||||
}
|
||||
|
||||
// defaultIP returns addr.IP if specified, otherwise attempts to find a non-loopback ipv4 IP
|
||||
func defaultIP(addr *net.TCPAddr) string {
|
||||
if !addr.IP.IsUnspecified() {
|
||||
return addr.IP.String()
|
||||
}
|
||||
|
||||
nics, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return addr.IP.String()
|
||||
}
|
||||
|
||||
for _, nic := range nics {
|
||||
if nic.Name == "docker0" || strings.HasPrefix(nic.Name, "vmnet") {
|
||||
continue
|
||||
}
|
||||
addrs, aerr := nic.Addrs()
|
||||
if aerr != nil {
|
||||
continue
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
if ip, ok := addr.(*net.IPNet); ok && !ip.IP.IsLoopback() {
|
||||
if ip.IP.To4() != nil {
|
||||
return ip.IP.String()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return addr.IP.String()
|
||||
}
|
||||
|
||||
// NewServer returns an http Server instance for the given service
|
||||
func (s *Service) NewServer() *Server {
|
||||
s.RegisterSDK(Map)
|
||||
|
@ -478,11 +567,12 @@ func (s *Service) NewServer() *Server {
|
|||
// for use in main.go, where Start() blocks, we can still set ServiceHostName
|
||||
ts := httptest.NewUnstartedServer(mux)
|
||||
|
||||
addr := ts.Listener.Addr().(*net.TCPAddr)
|
||||
port := strconv.Itoa(addr.Port)
|
||||
u := &url.URL{
|
||||
Scheme: "http",
|
||||
Host: ts.Listener.Addr().String(),
|
||||
Host: net.JoinHostPort(defaultIP(addr), port),
|
||||
Path: Map.Path,
|
||||
User: url.UserPassword("user", "pass"),
|
||||
}
|
||||
|
||||
// Redirect clients to this http server, rather than HostSystem.Name
|
||||
|
@ -510,7 +600,7 @@ func (s *Service) NewServer() *Server {
|
|||
m.Setting = append(m.Setting,
|
||||
&types.OptionValue{
|
||||
Key: "vcsim.server.url",
|
||||
Value: ts.URL,
|
||||
Value: u.String(),
|
||||
},
|
||||
&types.OptionValue{
|
||||
Key: "vcsim.server.cert",
|
||||
|
@ -518,6 +608,8 @@ func (s *Service) NewServer() *Server {
|
|||
},
|
||||
)
|
||||
|
||||
u.User = url.UserPassword("user", "pass")
|
||||
|
||||
return &Server{
|
||||
Server: ts,
|
||||
URL: u,
|
||||
|
|
|
@ -17,6 +17,11 @@ limitations under the License.
|
|||
package simulator
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/vim25/methods"
|
||||
"github.com/vmware/govmomi/vim25/mo"
|
||||
"github.com/vmware/govmomi/vim25/soap"
|
||||
|
@ -27,24 +32,110 @@ type VirtualMachineSnapshot struct {
|
|||
mo.VirtualMachineSnapshot
|
||||
}
|
||||
|
||||
func (v *VirtualMachineSnapshot) RemoveSnapshotTask(req *types.RemoveSnapshot_Task) soap.HasFault {
|
||||
func (v *VirtualMachineSnapshot) createSnapshotFiles() types.BaseMethodFault {
|
||||
vm := Map.Get(v.Vm).(*VirtualMachine)
|
||||
|
||||
snapshotDirectory := vm.Config.Files.SnapshotDirectory
|
||||
if snapshotDirectory == "" {
|
||||
snapshotDirectory = vm.Config.Files.VmPathName
|
||||
}
|
||||
|
||||
index := 1
|
||||
for {
|
||||
fileName := fmt.Sprintf("%s-Snapshot%d.vmsn", vm.Name, index)
|
||||
f, err := vm.createFile(snapshotDirectory, fileName, false)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *types.FileAlreadyExists:
|
||||
index++
|
||||
continue
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
_ = f.Close()
|
||||
|
||||
p, _ := parseDatastorePath(snapshotDirectory)
|
||||
vm.useDatastore(p.Datastore)
|
||||
datastorePath := object.DatastorePath{
|
||||
Datastore: p.Datastore,
|
||||
Path: path.Join(p.Path, fileName),
|
||||
}
|
||||
|
||||
dataLayoutKey := vm.addFileLayoutEx(datastorePath, 0)
|
||||
vm.addSnapshotLayout(v.Self, dataLayoutKey)
|
||||
vm.addSnapshotLayoutEx(v.Self, dataLayoutKey, -1)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (v *VirtualMachineSnapshot) removeSnapshotFiles(ctx *Context) types.BaseMethodFault {
|
||||
// TODO: also remove delta disks that were created when snapshot was taken
|
||||
|
||||
vm := Map.Get(v.Vm).(*VirtualMachine)
|
||||
|
||||
for idx, sLayout := range vm.Layout.Snapshot {
|
||||
if sLayout.Key == v.Self {
|
||||
vm.Layout.Snapshot = append(vm.Layout.Snapshot[:idx], vm.Layout.Snapshot[idx+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for idx, sLayoutEx := range vm.LayoutEx.Snapshot {
|
||||
if sLayoutEx.Key == v.Self {
|
||||
for _, file := range vm.LayoutEx.File {
|
||||
if file.Key == sLayoutEx.DataKey || file.Key == sLayoutEx.MemoryKey {
|
||||
p, fault := parseDatastorePath(file.Name)
|
||||
if fault != nil {
|
||||
return fault
|
||||
}
|
||||
|
||||
host := Map.Get(*vm.Runtime.Host).(*HostSystem)
|
||||
datastore := Map.FindByName(p.Datastore, host.Datastore).(*Datastore)
|
||||
dFilePath := path.Join(datastore.Info.GetDatastoreInfo().Url, p.Path)
|
||||
|
||||
_ = os.Remove(dFilePath)
|
||||
}
|
||||
}
|
||||
|
||||
vm.LayoutEx.Snapshot = append(vm.LayoutEx.Snapshot[:idx], vm.LayoutEx.Snapshot[idx+1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
vm.RefreshStorageInfo(ctx, nil)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *VirtualMachineSnapshot) RemoveSnapshotTask(ctx *Context, req *types.RemoveSnapshot_Task) soap.HasFault {
|
||||
task := CreateTask(v, "removeSnapshot", func(t *Task) (types.AnyType, types.BaseMethodFault) {
|
||||
Map.Remove(req.This)
|
||||
var changes []types.PropertyChange
|
||||
|
||||
vm := Map.Get(v.Vm).(*VirtualMachine)
|
||||
Map.WithLock(vm, func() {
|
||||
if vm.Snapshot.CurrentSnapshot != nil && *vm.Snapshot.CurrentSnapshot == req.This {
|
||||
parent := findParentSnapshotInTree(vm.Snapshot.RootSnapshotList, req.This)
|
||||
vm.Snapshot.CurrentSnapshot = parent
|
||||
changes = append(changes, types.PropertyChange{Name: "snapshot.currentSnapshot", Val: parent})
|
||||
}
|
||||
|
||||
vm.Snapshot.RootSnapshotList = removeSnapshotInTree(vm.Snapshot.RootSnapshotList, req.This, req.RemoveChildren)
|
||||
rootSnapshots := removeSnapshotInTree(vm.Snapshot.RootSnapshotList, req.This, req.RemoveChildren)
|
||||
changes = append(changes, types.PropertyChange{Name: "snapshot.rootSnapshotList", Val: rootSnapshots})
|
||||
|
||||
if len(vm.Snapshot.RootSnapshotList) == 0 {
|
||||
vm.Snapshot = nil
|
||||
if len(rootSnapshots) == 0 {
|
||||
changes = []types.PropertyChange{
|
||||
{Name: "snapshot", Val: nil},
|
||||
}
|
||||
}
|
||||
|
||||
Map.Get(req.This).(*VirtualMachineSnapshot).removeSnapshotFiles(ctx)
|
||||
|
||||
Map.Update(vm, changes)
|
||||
})
|
||||
|
||||
Map.Remove(req.This)
|
||||
|
||||
return nil, nil
|
||||
})
|
||||
|
||||
|
@ -59,7 +150,11 @@ func (v *VirtualMachineSnapshot) RevertToSnapshotTask(req *types.RevertToSnapsho
|
|||
task := CreateTask(v, "revertToSnapshot", func(t *Task) (types.AnyType, types.BaseMethodFault) {
|
||||
vm := Map.Get(v.Vm).(*VirtualMachine)
|
||||
|
||||
Map.WithLock(vm, func() { vm.Snapshot.CurrentSnapshot = &v.Self })
|
||||
Map.WithLock(vm, func() {
|
||||
Map.Update(vm, []types.PropertyChange{
|
||||
{Name: "snapshot.currentSnapshot", Val: v.Self},
|
||||
})
|
||||
})
|
||||
|
||||
return nil, nil
|
||||
})
|
||||
|
|
191
vendor/github.com/vmware/govmomi/simulator/storage_resource_manager.go
generated
vendored
Normal file
191
vendor/github.com/vmware/govmomi/simulator/storage_resource_manager.go
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
|||
/*
|
||||
Copyright (c) 2018 VMware, Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package simulator
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/vim25/methods"
|
||||
"github.com/vmware/govmomi/vim25/mo"
|
||||
"github.com/vmware/govmomi/vim25/soap"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
)
|
||||
|
||||
type StorageResourceManager struct {
|
||||
mo.StorageResourceManager
|
||||
}
|
||||
|
||||
func NewStorageResourceManager(ref types.ManagedObjectReference) object.Reference {
|
||||
m := &StorageResourceManager{}
|
||||
m.Self = ref
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *StorageResourceManager) ConfigureStorageDrsForPodTask(req *types.ConfigureStorageDrsForPod_Task) soap.HasFault {
|
||||
task := CreateTask(m, "configureStorageDrsForPod", func(*Task) (types.AnyType, types.BaseMethodFault) {
|
||||
cluster := Map.Get(req.Pod).(*StoragePod)
|
||||
|
||||
if s := req.Spec.PodConfigSpec; s != nil {
|
||||
config := &cluster.PodStorageDrsEntry.StorageDrsConfig.PodConfig
|
||||
|
||||
if s.Enabled != nil {
|
||||
config.Enabled = *s.Enabled
|
||||
}
|
||||
if s.DefaultVmBehavior != "" {
|
||||
config.DefaultVmBehavior = s.DefaultVmBehavior
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
})
|
||||
|
||||
return &methods.ConfigureStorageDrsForPod_TaskBody{
|
||||
Res: &types.ConfigureStorageDrsForPod_TaskResponse{
|
||||
Returnval: task.Run(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *StorageResourceManager) pod(ref *types.ManagedObjectReference) *StoragePod {
|
||||
if ref == nil {
|
||||
return nil
|
||||
}
|
||||
cluster := Map.Get(*ref).(*StoragePod)
|
||||
config := &cluster.PodStorageDrsEntry.StorageDrsConfig.PodConfig
|
||||
|
||||
if !config.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(cluster.ChildEntity) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return cluster
|
||||
}
|
||||
|
||||
func (m *StorageResourceManager) RecommendDatastores(req *types.RecommendDatastores) soap.HasFault {
|
||||
spec := req.StorageSpec.PodSelectionSpec
|
||||
body := new(methods.RecommendDatastoresBody)
|
||||
res := new(types.RecommendDatastoresResponse)
|
||||
key := 0
|
||||
invalid := func(prop string) soap.HasFault {
|
||||
body.Fault_ = Fault("", &types.InvalidArgument{
|
||||
InvalidProperty: prop,
|
||||
})
|
||||
return body
|
||||
}
|
||||
add := func(cluster *StoragePod, ds types.ManagedObjectReference) {
|
||||
key++
|
||||
res.Returnval.Recommendations = append(res.Returnval.Recommendations, types.ClusterRecommendation{
|
||||
Key: strconv.Itoa(key),
|
||||
Type: "V1",
|
||||
Time: time.Now(),
|
||||
Rating: 1,
|
||||
Reason: "storagePlacement",
|
||||
ReasonText: "Satisfy storage initial placement requests",
|
||||
WarningText: "",
|
||||
WarningDetails: (*types.LocalizableMessage)(nil),
|
||||
Prerequisite: nil,
|
||||
Action: []types.BaseClusterAction{
|
||||
&types.StoragePlacementAction{
|
||||
ClusterAction: types.ClusterAction{
|
||||
Type: "StoragePlacementV1",
|
||||
Target: (*types.ManagedObjectReference)(nil),
|
||||
},
|
||||
Vm: (*types.ManagedObjectReference)(nil),
|
||||
RelocateSpec: types.VirtualMachineRelocateSpec{
|
||||
Service: (*types.ServiceLocator)(nil),
|
||||
Folder: (*types.ManagedObjectReference)(nil),
|
||||
Datastore: &ds,
|
||||
DiskMoveType: "moveAllDiskBackingsAndAllowSharing",
|
||||
Pool: (*types.ManagedObjectReference)(nil),
|
||||
Host: (*types.ManagedObjectReference)(nil),
|
||||
Disk: nil,
|
||||
Transform: "",
|
||||
DeviceChange: nil,
|
||||
Profile: nil,
|
||||
},
|
||||
Destination: ds,
|
||||
SpaceUtilBefore: 5.00297212600708,
|
||||
SpaceDemandBefore: 5.00297212600708,
|
||||
SpaceUtilAfter: 5.16835880279541,
|
||||
SpaceDemandAfter: 5.894514083862305,
|
||||
IoLatencyBefore: 0,
|
||||
},
|
||||
},
|
||||
Target: &cluster.Self,
|
||||
})
|
||||
}
|
||||
|
||||
var devices object.VirtualDeviceList
|
||||
|
||||
switch types.StoragePlacementSpecPlacementType(req.StorageSpec.Type) {
|
||||
case types.StoragePlacementSpecPlacementTypeCreate:
|
||||
if req.StorageSpec.ResourcePool == nil {
|
||||
return invalid("resourcePool")
|
||||
}
|
||||
if req.StorageSpec.ConfigSpec == nil {
|
||||
return invalid("configSpec")
|
||||
}
|
||||
for _, d := range req.StorageSpec.ConfigSpec.DeviceChange {
|
||||
devices = append(devices, d.GetVirtualDeviceConfigSpec().Device)
|
||||
}
|
||||
cluster := m.pod(spec.StoragePod)
|
||||
if cluster == nil {
|
||||
if f := req.StorageSpec.ConfigSpec.Files; f == nil || f.VmPathName == "" {
|
||||
return invalid("configSpec.files")
|
||||
}
|
||||
}
|
||||
case types.StoragePlacementSpecPlacementTypeClone:
|
||||
if req.StorageSpec.Folder == nil {
|
||||
return invalid("folder")
|
||||
}
|
||||
if req.StorageSpec.Vm == nil {
|
||||
return invalid("vm")
|
||||
}
|
||||
if req.StorageSpec.CloneName == "" {
|
||||
return invalid("cloneName")
|
||||
}
|
||||
if req.StorageSpec.CloneSpec == nil {
|
||||
return invalid("cloneSpec")
|
||||
}
|
||||
}
|
||||
|
||||
for _, placement := range spec.InitialVmConfig {
|
||||
cluster := m.pod(&placement.StoragePod)
|
||||
if cluster == nil {
|
||||
return invalid("podSelectionSpec.storagePod")
|
||||
}
|
||||
|
||||
for _, disk := range placement.Disk {
|
||||
if devices.FindByKey(disk.DiskId) == nil {
|
||||
return invalid("podSelectionSpec.initialVmConfig.disk.fileBacking")
|
||||
}
|
||||
}
|
||||
|
||||
for _, ds := range cluster.ChildEntity {
|
||||
add(cluster, ds)
|
||||
}
|
||||
}
|
||||
|
||||
body.Res = res
|
||||
return body
|
||||
}
|
|
@ -20,6 +20,8 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/simulator/esx"
|
||||
"github.com/vmware/govmomi/simulator/vpx"
|
||||
"github.com/vmware/govmomi/vim25/mo"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
)
|
||||
|
@ -34,6 +36,11 @@ type TaskManager struct {
|
|||
func NewTaskManager(ref types.ManagedObjectReference) object.Reference {
|
||||
s := &TaskManager{}
|
||||
s.Self = ref
|
||||
if Map.IsESX() {
|
||||
s.Description = esx.Description
|
||||
} else {
|
||||
s.Description = vpx.Description
|
||||
}
|
||||
Map.AddHandler(s)
|
||||
return s
|
||||
}
|
||||
|
|
|
@ -132,7 +132,7 @@ func (m *VirtualDiskManager) MoveVirtualDiskTask(req *types.MoveVirtualDisk_Task
|
|||
SourceDatacenter: req.SourceDatacenter,
|
||||
DestinationName: dest[i],
|
||||
DestinationDatacenter: req.DestDatacenter,
|
||||
Force: req.Force,
|
||||
Force: req.Force,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
|
@ -168,7 +168,7 @@ func (m *VirtualDiskManager) CopyVirtualDiskTask(req *types.CopyVirtualDisk_Task
|
|||
SourceDatacenter: req.SourceDatacenter,
|
||||
DestinationName: dest[i],
|
||||
DestinationDatacenter: req.DestDatacenter,
|
||||
Force: req.Force,
|
||||
Force: req.Force,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
|
|
|
@ -19,17 +19,18 @@ package simulator
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/simulator/esx"
|
||||
"github.com/vmware/govmomi/vim25/methods"
|
||||
|
@ -43,18 +44,21 @@ type VirtualMachine struct {
|
|||
|
||||
log string
|
||||
sid int32
|
||||
run container
|
||||
}
|
||||
|
||||
func NewVirtualMachine(parent types.ManagedObjectReference, spec *types.VirtualMachineConfigSpec) (*VirtualMachine, types.BaseMethodFault) {
|
||||
vm := &VirtualMachine{}
|
||||
vm.Parent = &parent
|
||||
|
||||
Map.Get(parent).(*Folder).putChild(vm)
|
||||
|
||||
if spec.Name == "" {
|
||||
return nil, &types.InvalidVmConfig{Property: "configSpec.name"}
|
||||
return vm, &types.InvalidVmConfig{Property: "configSpec.name"}
|
||||
}
|
||||
|
||||
if spec.Files == nil || spec.Files.VmPathName == "" {
|
||||
return nil, &types.InvalidVmConfig{Property: "configSpec.files.vmPathName"}
|
||||
return vm, &types.InvalidVmConfig{Property: "configSpec.files.vmPathName"}
|
||||
}
|
||||
|
||||
rspec := types.DefaultResourceConfigSpec()
|
||||
|
@ -65,6 +69,10 @@ func NewVirtualMachine(parent types.ManagedObjectReference, spec *types.VirtualM
|
|||
MemoryAllocation: &rspec.MemoryAllocation,
|
||||
CpuAllocation: &rspec.CpuAllocation,
|
||||
}
|
||||
vm.Layout = &types.VirtualMachineFileLayout{}
|
||||
vm.LayoutEx = &types.VirtualMachineFileLayoutEx{
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
vm.Snapshot = nil // intentionally set to nil until a snapshot is created
|
||||
vm.Storage = &types.VirtualMachineStorageInfo{
|
||||
Timestamp: time.Now(),
|
||||
|
@ -92,7 +100,7 @@ func NewVirtualMachine(parent types.ManagedObjectReference, spec *types.VirtualM
|
|||
MemoryMB: 32,
|
||||
Uuid: uuid.New().String(),
|
||||
InstanceUuid: uuid.New().String(),
|
||||
Version: "vmx-11",
|
||||
Version: esx.HardwareVersion,
|
||||
Files: &types.VirtualMachineFileInfo{
|
||||
SnapshotDirectory: dsPath,
|
||||
SuspendDirectory: dsPath,
|
||||
|
@ -105,7 +113,7 @@ func NewVirtualMachine(parent types.ManagedObjectReference, spec *types.VirtualM
|
|||
|
||||
err := vm.configure(&defaults)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return vm, err
|
||||
}
|
||||
|
||||
vm.Runtime.PowerState = types.VirtualMachinePowerStatePoweredOff
|
||||
|
@ -166,6 +174,7 @@ func (vm *VirtualMachine) apply(spec *types.VirtualMachineConfigSpec) {
|
|||
{spec.Files.VmPathName, &vm.Config.Files.VmPathName},
|
||||
{spec.Files.VmPathName, &vm.Summary.Config.VmPathName},
|
||||
{spec.Files.SnapshotDirectory, &vm.Config.Files.SnapshotDirectory},
|
||||
{spec.Files.SuspendDirectory, &vm.Config.Files.SuspendDirectory},
|
||||
{spec.Files.LogDirectory, &vm.Config.Files.LogDirectory},
|
||||
}
|
||||
|
||||
|
@ -259,7 +268,33 @@ func (vm *VirtualMachine) apply(spec *types.VirtualMachineConfigSpec) {
|
|||
vm.Config.Hardware.NumCoresPerSocket = spec.NumCoresPerSocket
|
||||
}
|
||||
|
||||
vm.Config.ExtraConfig = append(vm.Config.ExtraConfig, spec.ExtraConfig...)
|
||||
var changes []types.PropertyChange
|
||||
for _, c := range spec.ExtraConfig {
|
||||
val := c.GetOptionValue()
|
||||
key := strings.TrimPrefix(val.Key, "SET.")
|
||||
if key == val.Key {
|
||||
vm.Config.ExtraConfig = append(vm.Config.ExtraConfig, c)
|
||||
continue
|
||||
}
|
||||
changes = append(changes, types.PropertyChange{Name: key, Val: val.Value})
|
||||
|
||||
switch key {
|
||||
case "guest.ipAddress":
|
||||
ip := val.Value.(string)
|
||||
vm.Guest.Net[0].IpAddress = []string{ip}
|
||||
changes = append(changes,
|
||||
types.PropertyChange{Name: "summary." + key, Val: ip},
|
||||
types.PropertyChange{Name: "guest.net", Val: vm.Guest.Net},
|
||||
)
|
||||
case "guest.hostName":
|
||||
changes = append(changes,
|
||||
types.PropertyChange{Name: "summary." + key, Val: val.Value},
|
||||
)
|
||||
}
|
||||
}
|
||||
if len(changes) != 0 {
|
||||
Map.Update(vm, changes)
|
||||
}
|
||||
|
||||
vm.Config.Modified = time.Now()
|
||||
}
|
||||
|
@ -298,6 +333,400 @@ func (vm *VirtualMachine) configure(spec *types.VirtualMachineConfigSpec) types.
|
|||
return vm.configureDevices(spec)
|
||||
}
|
||||
|
||||
func getVMFileType(fileName string) types.VirtualMachineFileLayoutExFileType {
|
||||
var fileType types.VirtualMachineFileLayoutExFileType
|
||||
|
||||
fileExt := path.Ext(fileName)
|
||||
fileNameNoExt := strings.TrimSuffix(fileName, fileExt)
|
||||
|
||||
switch fileExt {
|
||||
case ".vmx":
|
||||
fileType = types.VirtualMachineFileLayoutExFileTypeConfig
|
||||
case ".core":
|
||||
fileType = types.VirtualMachineFileLayoutExFileTypeCore
|
||||
case ".vmdk":
|
||||
fileType = types.VirtualMachineFileLayoutExFileTypeDiskDescriptor
|
||||
if strings.HasSuffix(fileNameNoExt, "-digest") {
|
||||
fileType = types.VirtualMachineFileLayoutExFileTypeDigestDescriptor
|
||||
}
|
||||
|
||||
extentSuffixes := []string{"-flat", "-delta", "-s", "-rdm", "-rdmp"}
|
||||
for _, suffix := range extentSuffixes {
|
||||
if strings.HasSuffix(fileNameNoExt, suffix) {
|
||||
fileType = types.VirtualMachineFileLayoutExFileTypeDiskExtent
|
||||
} else if strings.HasSuffix(fileNameNoExt, "-digest"+suffix) {
|
||||
fileType = types.VirtualMachineFileLayoutExFileTypeDigestExtent
|
||||
}
|
||||
}
|
||||
case ".psf":
|
||||
fileType = types.VirtualMachineFileLayoutExFileTypeDiskReplicationState
|
||||
case ".vmxf":
|
||||
fileType = types.VirtualMachineFileLayoutExFileTypeExtendedConfig
|
||||
case ".vmft":
|
||||
fileType = types.VirtualMachineFileLayoutExFileTypeFtMetadata
|
||||
case ".log":
|
||||
fileType = types.VirtualMachineFileLayoutExFileTypeLog
|
||||
case ".nvram":
|
||||
fileType = types.VirtualMachineFileLayoutExFileTypeNvram
|
||||
case ".png", ".bmp":
|
||||
fileType = types.VirtualMachineFileLayoutExFileTypeScreenshot
|
||||
case ".vmsn":
|
||||
fileType = types.VirtualMachineFileLayoutExFileTypeSnapshotData
|
||||
case ".vmsd":
|
||||
fileType = types.VirtualMachineFileLayoutExFileTypeSnapshotList
|
||||
case ".xml":
|
||||
if strings.HasSuffix(fileNameNoExt, "-aux") {
|
||||
fileType = types.VirtualMachineFileLayoutExFileTypeSnapshotManifestList
|
||||
}
|
||||
case ".stat":
|
||||
fileType = types.VirtualMachineFileLayoutExFileTypeStat
|
||||
case ".vmss":
|
||||
fileType = types.VirtualMachineFileLayoutExFileTypeSuspend
|
||||
case ".vmem":
|
||||
if strings.Contains(fileNameNoExt, "Snapshot") {
|
||||
fileType = types.VirtualMachineFileLayoutExFileTypeSnapshotMemory
|
||||
} else {
|
||||
fileType = types.VirtualMachineFileLayoutExFileTypeSuspendMemory
|
||||
}
|
||||
case ".vswp":
|
||||
if strings.HasPrefix(fileNameNoExt, "vmx-") {
|
||||
fileType = types.VirtualMachineFileLayoutExFileTypeUwswap
|
||||
} else {
|
||||
fileType = types.VirtualMachineFileLayoutExFileTypeSwap
|
||||
}
|
||||
case "":
|
||||
if strings.HasPrefix(fileNameNoExt, "imcf-") {
|
||||
fileType = types.VirtualMachineFileLayoutExFileTypeGuestCustomization
|
||||
}
|
||||
}
|
||||
|
||||
return fileType
|
||||
}
|
||||
|
||||
func (vm *VirtualMachine) addFileLayoutEx(datastorePath object.DatastorePath, fileSize int64) int32 {
|
||||
var newKey int32
|
||||
for _, layoutFile := range vm.LayoutEx.File {
|
||||
if layoutFile.Name == datastorePath.String() {
|
||||
return layoutFile.Key
|
||||
}
|
||||
|
||||
if layoutFile.Key >= newKey {
|
||||
newKey = layoutFile.Key + 1
|
||||
}
|
||||
}
|
||||
|
||||
fileType := getVMFileType(filepath.Base(datastorePath.Path))
|
||||
|
||||
switch fileType {
|
||||
case types.VirtualMachineFileLayoutExFileTypeNvram, types.VirtualMachineFileLayoutExFileTypeSnapshotList:
|
||||
vm.addConfigLayout(datastorePath.Path)
|
||||
case types.VirtualMachineFileLayoutExFileTypeLog:
|
||||
vm.addLogLayout(datastorePath.Path)
|
||||
case types.VirtualMachineFileLayoutExFileTypeSwap:
|
||||
vm.addSwapLayout(datastorePath.String())
|
||||
}
|
||||
|
||||
vm.LayoutEx.File = append(vm.LayoutEx.File, types.VirtualMachineFileLayoutExFileInfo{
|
||||
Accessible: types.NewBool(true),
|
||||
BackingObjectId: "",
|
||||
Key: newKey,
|
||||
Name: datastorePath.String(),
|
||||
Size: fileSize,
|
||||
Type: string(fileType),
|
||||
UniqueSize: fileSize,
|
||||
})
|
||||
|
||||
vm.LayoutEx.Timestamp = time.Now()
|
||||
|
||||
vm.updateStorage()
|
||||
|
||||
return newKey
|
||||
}
|
||||
|
||||
func (vm *VirtualMachine) addConfigLayout(name string) {
|
||||
for _, config := range vm.Layout.ConfigFile {
|
||||
if config == name {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
vm.Layout.ConfigFile = append(vm.Layout.ConfigFile, name)
|
||||
|
||||
vm.updateStorage()
|
||||
}
|
||||
|
||||
func (vm *VirtualMachine) addLogLayout(name string) {
|
||||
for _, log := range vm.Layout.LogFile {
|
||||
if log == name {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
vm.Layout.LogFile = append(vm.Layout.LogFile, name)
|
||||
|
||||
vm.updateStorage()
|
||||
}
|
||||
|
||||
func (vm *VirtualMachine) addSwapLayout(name string) {
|
||||
vm.Layout.SwapFile = name
|
||||
|
||||
vm.updateStorage()
|
||||
}
|
||||
|
||||
func (vm *VirtualMachine) addSnapshotLayout(snapshot types.ManagedObjectReference, dataKey int32) {
|
||||
for _, snapshotLayout := range vm.Layout.Snapshot {
|
||||
if snapshotLayout.Key == snapshot {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var snapshotFiles []string
|
||||
for _, file := range vm.LayoutEx.File {
|
||||
if file.Key == dataKey || file.Type == "diskDescriptor" {
|
||||
snapshotFiles = append(snapshotFiles, file.Name)
|
||||
}
|
||||
}
|
||||
|
||||
vm.Layout.Snapshot = append(vm.Layout.Snapshot, types.VirtualMachineFileLayoutSnapshotLayout{
|
||||
Key: snapshot,
|
||||
SnapshotFile: snapshotFiles,
|
||||
})
|
||||
|
||||
vm.updateStorage()
|
||||
}
|
||||
|
||||
func (vm *VirtualMachine) addSnapshotLayoutEx(snapshot types.ManagedObjectReference, dataKey int32, memoryKey int32) {
|
||||
for _, snapshotLayoutEx := range vm.LayoutEx.Snapshot {
|
||||
if snapshotLayoutEx.Key == snapshot {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
vm.LayoutEx.Snapshot = append(vm.LayoutEx.Snapshot, types.VirtualMachineFileLayoutExSnapshotLayout{
|
||||
DataKey: dataKey,
|
||||
Disk: vm.LayoutEx.Disk,
|
||||
Key: snapshot,
|
||||
MemoryKey: memoryKey,
|
||||
})
|
||||
|
||||
vm.LayoutEx.Timestamp = time.Now()
|
||||
|
||||
vm.updateStorage()
|
||||
}
|
||||
|
||||
// Updates both vm.Layout.Disk and vm.LayoutEx.Disk
|
||||
func (vm *VirtualMachine) updateDiskLayouts() types.BaseMethodFault {
|
||||
var disksLayout []types.VirtualMachineFileLayoutDiskLayout
|
||||
var disksLayoutEx []types.VirtualMachineFileLayoutExDiskLayout
|
||||
|
||||
disks := object.VirtualDeviceList(vm.Config.Hardware.Device).SelectByType((*types.VirtualDisk)(nil))
|
||||
for _, disk := range disks {
|
||||
disk := disk.(*types.VirtualDisk)
|
||||
diskBacking := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
|
||||
|
||||
diskLayout := &types.VirtualMachineFileLayoutDiskLayout{Key: disk.Key}
|
||||
diskLayoutEx := &types.VirtualMachineFileLayoutExDiskLayout{Key: disk.Key}
|
||||
|
||||
// Iterate through disk and its parents
|
||||
for {
|
||||
dFileName := diskBacking.GetVirtualDeviceFileBackingInfo().FileName
|
||||
|
||||
var fileKeys []int32
|
||||
|
||||
dm := Map.VirtualDiskManager()
|
||||
// Add disk descriptor and extent files
|
||||
for _, diskName := range dm.names(dFileName) {
|
||||
// get full path including datastore location
|
||||
p, fault := parseDatastorePath(diskName)
|
||||
if fault != nil {
|
||||
return fault
|
||||
}
|
||||
|
||||
datastore := vm.useDatastore(p.Datastore)
|
||||
dFilePath := path.Join(datastore.Info.GetDatastoreInfo().Url, p.Path)
|
||||
|
||||
var fileSize int64
|
||||
// If file can not be opened - fileSize will be 0
|
||||
if dFileInfo, err := os.Stat(dFilePath); err == nil {
|
||||
fileSize = dFileInfo.Size()
|
||||
}
|
||||
|
||||
diskKey := vm.addFileLayoutEx(*p, fileSize)
|
||||
fileKeys = append(fileKeys, diskKey)
|
||||
}
|
||||
|
||||
diskLayout.DiskFile = append(diskLayout.DiskFile, dFileName)
|
||||
diskLayoutEx.Chain = append(diskLayoutEx.Chain, types.VirtualMachineFileLayoutExDiskUnit{
|
||||
FileKey: fileKeys,
|
||||
})
|
||||
|
||||
if parent := diskBacking.Parent; parent != nil {
|
||||
diskBacking = parent
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
disksLayout = append(disksLayout, *diskLayout)
|
||||
disksLayoutEx = append(disksLayoutEx, *diskLayoutEx)
|
||||
}
|
||||
|
||||
vm.Layout.Disk = disksLayout
|
||||
|
||||
vm.LayoutEx.Disk = disksLayoutEx
|
||||
vm.LayoutEx.Timestamp = time.Now()
|
||||
|
||||
vm.updateStorage()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vm *VirtualMachine) updateStorage() types.BaseMethodFault {
|
||||
// Committed - sum of Size for each file in vm.LayoutEx.File
|
||||
// Unshared - sum of Size for each disk (.vmdk) in vm.LayoutEx.File
|
||||
// Uncommitted - disk capacity minus disk usage (only currently used disk)
|
||||
var datastoresUsage []types.VirtualMachineUsageOnDatastore
|
||||
|
||||
disks := object.VirtualDeviceList(vm.Config.Hardware.Device).SelectByType((*types.VirtualDisk)(nil))
|
||||
|
||||
for _, file := range vm.LayoutEx.File {
|
||||
p, fault := parseDatastorePath(file.Name)
|
||||
if fault != nil {
|
||||
return fault
|
||||
}
|
||||
|
||||
datastore := vm.useDatastore(p.Datastore)
|
||||
dsUsage := &types.VirtualMachineUsageOnDatastore{
|
||||
Datastore: datastore.Self,
|
||||
}
|
||||
|
||||
for idx, usage := range datastoresUsage {
|
||||
if usage.Datastore == datastore.Self {
|
||||
datastoresUsage = append(datastoresUsage[:idx], datastoresUsage[idx+1:]...)
|
||||
dsUsage = &usage
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
dsUsage.Committed = file.Size
|
||||
|
||||
if path.Ext(file.Name) == ".vmdk" {
|
||||
dsUsage.Unshared = file.Size
|
||||
}
|
||||
|
||||
for _, disk := range disks {
|
||||
disk := disk.(*types.VirtualDisk)
|
||||
backing := disk.Backing.(types.BaseVirtualDeviceFileBackingInfo).GetVirtualDeviceFileBackingInfo()
|
||||
|
||||
if backing.FileName == file.Name {
|
||||
dsUsage.Uncommitted = disk.CapacityInBytes
|
||||
}
|
||||
}
|
||||
|
||||
datastoresUsage = append(datastoresUsage, *dsUsage)
|
||||
}
|
||||
|
||||
vm.Storage.PerDatastoreUsage = datastoresUsage
|
||||
vm.Storage.Timestamp = time.Now()
|
||||
|
||||
storageSummary := &types.VirtualMachineStorageSummary{
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
|
||||
for _, usage := range datastoresUsage {
|
||||
storageSummary.Committed += usage.Committed
|
||||
storageSummary.Uncommitted += usage.Uncommitted
|
||||
storageSummary.Unshared += usage.Unshared
|
||||
}
|
||||
|
||||
vm.Summary.Storage = storageSummary
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vm *VirtualMachine) RefreshStorageInfo(ctx *Context, req *types.RefreshStorageInfo) soap.HasFault {
|
||||
body := new(methods.RefreshStorageInfoBody)
|
||||
|
||||
if vm.Runtime.Host == nil {
|
||||
// VM not fully created
|
||||
return body
|
||||
}
|
||||
|
||||
// Validate that all files in vm.LayoutEx.File can still be found
|
||||
for idx := len(vm.LayoutEx.File) - 1; idx >= 0; idx-- {
|
||||
file := vm.LayoutEx.File[idx]
|
||||
|
||||
p, fault := parseDatastorePath(file.Name)
|
||||
if fault != nil {
|
||||
body.Fault_ = Fault("", fault)
|
||||
return body
|
||||
}
|
||||
|
||||
if _, err := os.Stat(p.String()); err != nil {
|
||||
vm.LayoutEx.File = append(vm.LayoutEx.File[:idx], vm.LayoutEx.File[idx+1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
// Directories will be used to locate VM files.
|
||||
// Does not include information about virtual disk file locations.
|
||||
locations := []string{
|
||||
vm.Config.Files.VmPathName,
|
||||
vm.Config.Files.SnapshotDirectory,
|
||||
vm.Config.Files.LogDirectory,
|
||||
vm.Config.Files.SuspendDirectory,
|
||||
vm.Config.Files.FtMetadataDirectory,
|
||||
}
|
||||
|
||||
for _, directory := range locations {
|
||||
if directory == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
p, fault := parseDatastorePath(directory)
|
||||
if fault != nil {
|
||||
body.Fault_ = Fault("", fault)
|
||||
return body
|
||||
}
|
||||
|
||||
datastore := vm.useDatastore(p.Datastore)
|
||||
directory := path.Join(datastore.Info.GetDatastoreInfo().Url, p.Path)
|
||||
|
||||
if path.Ext(p.Path) == ".vmx" {
|
||||
directory = path.Dir(directory) // vm.Config.Files.VmPathName can be a directory or full path to .vmx
|
||||
}
|
||||
|
||||
if _, err := os.Stat(directory); err != nil {
|
||||
// Can not access the directory
|
||||
continue
|
||||
}
|
||||
|
||||
files, err := ioutil.ReadDir(directory)
|
||||
if err != nil {
|
||||
body.Fault_ = soap.ToSoapFault(err)
|
||||
return body
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
datastorePath := object.DatastorePath{
|
||||
Datastore: p.Datastore,
|
||||
Path: strings.TrimPrefix(file.Name(), datastore.Info.GetDatastoreInfo().Url),
|
||||
}
|
||||
|
||||
vm.addFileLayoutEx(datastorePath, file.Size())
|
||||
}
|
||||
}
|
||||
|
||||
fault := vm.updateDiskLayouts()
|
||||
if fault != nil {
|
||||
body.Fault_ = Fault("", fault)
|
||||
return body
|
||||
}
|
||||
|
||||
vm.LayoutEx.Timestamp = time.Now()
|
||||
|
||||
return body
|
||||
}
|
||||
|
||||
func (vm *VirtualMachine) useDatastore(name string) *Datastore {
|
||||
host := Map.Get(*vm.Runtime.Host).(*HostSystem)
|
||||
|
||||
|
@ -321,8 +750,8 @@ func (vm *VirtualMachine) createFile(spec string, name string, register bool) (*
|
|||
file := path.Join(ds.Info.GetDatastoreInfo().Url, p.Path)
|
||||
|
||||
if name != "" {
|
||||
if path.Ext(file) != "" {
|
||||
file = path.Dir(file)
|
||||
if path.Ext(p.Path) == ".vmx" {
|
||||
file = path.Dir(file) // vm.Config.Files.VmPathName can be a directory or full path to .vmx
|
||||
}
|
||||
|
||||
file = path.Join(file, name)
|
||||
|
@ -380,6 +809,15 @@ func (vm *VirtualMachine) logPrintf(format string, v ...interface{}) {
|
|||
func (vm *VirtualMachine) create(spec *types.VirtualMachineConfigSpec, register bool) types.BaseMethodFault {
|
||||
vm.apply(spec)
|
||||
|
||||
if spec.Version != "" {
|
||||
v := strings.TrimPrefix(spec.Version, "vmx-")
|
||||
_, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
log.Printf("unsupported hardware version: %s", spec.Version)
|
||||
return new(types.NotSupported)
|
||||
}
|
||||
}
|
||||
|
||||
files := []struct {
|
||||
spec string
|
||||
name string
|
||||
|
@ -456,20 +894,18 @@ func (vm *VirtualMachine) configureDevice(devices object.VirtualDeviceList, spec
|
|||
d := device.GetVirtualDevice()
|
||||
var controller types.BaseVirtualController
|
||||
|
||||
if d.Key < 0 {
|
||||
// Choose a unique key
|
||||
if d.Key == -1 {
|
||||
d.Key = devices.NewKey()
|
||||
}
|
||||
|
||||
if d.Key <= 0 {
|
||||
// Keys can't be negative; Key 0 is reserved
|
||||
d.Key = devices.NewKey()
|
||||
d.Key *= -1
|
||||
}
|
||||
|
||||
for {
|
||||
if devices.FindByKey(d.Key) == nil {
|
||||
break
|
||||
}
|
||||
d.Key++
|
||||
// Choose a unique key
|
||||
for {
|
||||
if devices.FindByKey(d.Key) == nil {
|
||||
break
|
||||
}
|
||||
d.Key++
|
||||
}
|
||||
|
||||
label := devices.Name(device)
|
||||
|
@ -481,10 +917,12 @@ func (vm *VirtualMachine) configureDevice(devices object.VirtualDeviceList, spec
|
|||
case types.BaseVirtualEthernetCard:
|
||||
controller = devices.PickController((*types.VirtualPCIController)(nil))
|
||||
var net types.ManagedObjectReference
|
||||
var name string
|
||||
|
||||
switch b := d.Backing.(type) {
|
||||
case *types.VirtualEthernetCardNetworkBackingInfo:
|
||||
summary = b.DeviceName
|
||||
name = b.DeviceName
|
||||
summary = name
|
||||
net = Map.FindByName(b.DeviceName, dc.Network).Reference()
|
||||
b.Network = &net
|
||||
case *types.VirtualEthernetCardDistributedVirtualPortBackingInfo:
|
||||
|
@ -493,20 +931,35 @@ func (vm *VirtualMachine) configureDevice(devices object.VirtualDeviceList, spec
|
|||
net.Value = b.Port.PortgroupKey
|
||||
}
|
||||
|
||||
vm.Network = append(vm.Network, net)
|
||||
Map.Update(vm, []types.PropertyChange{
|
||||
{Name: "summary.config.numEthernetCards", Val: vm.Summary.Config.NumEthernetCards + 1},
|
||||
{Name: "network", Val: append(vm.Network, net)},
|
||||
})
|
||||
|
||||
c := x.GetVirtualEthernetCard()
|
||||
if c.MacAddress == "" {
|
||||
c.MacAddress = vm.generateMAC()
|
||||
}
|
||||
|
||||
if spec.Operation == types.VirtualDeviceConfigSpecOperationAdd {
|
||||
vm.Guest.Net = append(vm.Guest.Net, types.GuestNicInfo{
|
||||
Network: name,
|
||||
IpAddress: nil,
|
||||
MacAddress: c.MacAddress,
|
||||
Connected: true,
|
||||
DeviceConfigId: c.Key,
|
||||
})
|
||||
}
|
||||
case *types.VirtualDisk:
|
||||
summary = fmt.Sprintf("%s KB", numberToString(x.CapacityInKB, ','))
|
||||
switch b := d.Backing.(type) {
|
||||
case types.BaseVirtualDeviceFileBackingInfo:
|
||||
info := b.GetVirtualDeviceFileBackingInfo()
|
||||
var path object.DatastorePath
|
||||
path.FromString(info.FileName)
|
||||
|
||||
if info.FileName == "" {
|
||||
filename, err := vm.genVmdkPath()
|
||||
if path.Path == "" {
|
||||
filename, err := vm.genVmdkPath(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -522,6 +975,10 @@ func (vm *VirtualMachine) configureDevice(devices object.VirtualDeviceList, spec
|
|||
return err
|
||||
}
|
||||
|
||||
Map.Update(vm, []types.PropertyChange{
|
||||
{Name: "summary.config.numVirtualDisks", Val: vm.Summary.Config.NumVirtualDisks + 1},
|
||||
})
|
||||
|
||||
p, _ := parseDatastorePath(info.FileName)
|
||||
|
||||
host := Map.Get(*vm.Runtime.Host).(*HostSystem)
|
||||
|
@ -535,6 +992,8 @@ func (vm *VirtualMachine) configureDevice(devices object.VirtualDeviceList, spec
|
|||
// XXX: compare disk size and free space until windows stat is supported
|
||||
ds.Summary.FreeSpace -= getDiskSize(x)
|
||||
ds.Info.GetDatastoreInfo().FreeSpace = ds.Summary.FreeSpace
|
||||
|
||||
vm.updateDiskLayouts()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -591,6 +1050,11 @@ func (vm *VirtualMachine) removeDevice(devices object.VirtualDeviceList, spec *t
|
|||
})
|
||||
}
|
||||
}
|
||||
Map.Update(vm, []types.PropertyChange{
|
||||
{Name: "summary.config.numVirtualDisks", Val: vm.Summary.Config.NumVirtualDisks - 1},
|
||||
})
|
||||
|
||||
vm.updateDiskLayouts()
|
||||
case types.BaseVirtualEthernetCard:
|
||||
var net types.ManagedObjectReference
|
||||
|
||||
|
@ -602,7 +1066,12 @@ func (vm *VirtualMachine) removeDevice(devices object.VirtualDeviceList, spec *t
|
|||
net.Value = b.Port.PortgroupKey
|
||||
}
|
||||
|
||||
RemoveReference(&vm.Network, net)
|
||||
networks := vm.Network
|
||||
RemoveReference(&networks, net)
|
||||
Map.Update(vm, []types.PropertyChange{
|
||||
{Name: "summary.config.numEthernetCards", Val: vm.Summary.Config.NumEthernetCards - 1},
|
||||
{Name: "network", Val: networks},
|
||||
})
|
||||
}
|
||||
|
||||
break
|
||||
|
@ -611,9 +1080,16 @@ func (vm *VirtualMachine) removeDevice(devices object.VirtualDeviceList, spec *t
|
|||
return devices
|
||||
}
|
||||
|
||||
func (vm *VirtualMachine) genVmdkPath() (string, types.BaseMethodFault) {
|
||||
vmdir := path.Dir(vm.Config.Files.VmPathName)
|
||||
|
||||
func (vm *VirtualMachine) genVmdkPath(p object.DatastorePath) (string, types.BaseMethodFault) {
|
||||
if p.Datastore == "" {
|
||||
p.FromString(vm.Config.Files.VmPathName)
|
||||
}
|
||||
if p.Path == "" {
|
||||
p.Path = vm.Config.Name
|
||||
} else {
|
||||
p.Path = path.Dir(p.Path)
|
||||
}
|
||||
vmdir := p.String()
|
||||
index := 0
|
||||
for {
|
||||
var filename string
|
||||
|
@ -651,13 +1127,22 @@ func (vm *VirtualMachine) configureDevices(spec *types.VirtualMachineConfigSpec)
|
|||
|
||||
switch dspec.Operation {
|
||||
case types.VirtualDeviceConfigSpecOperationAdd:
|
||||
if devices.FindByKey(device.Key) != nil {
|
||||
if vm.Self.Value != "" { // moid isn't set until CreateVM is done
|
||||
return invalid
|
||||
if devices.FindByKey(device.Key) != nil && device.ControllerKey == 0 {
|
||||
// Note: real ESX does not allow adding base controllers (ControllerKey = 0)
|
||||
// after VM is created (returns success but device is not added).
|
||||
continue
|
||||
} else if device.UnitNumber != nil && devices.SelectByType(dspec.Device).Select(func(d types.BaseVirtualDevice) bool {
|
||||
base := d.GetVirtualDevice()
|
||||
if base.UnitNumber != nil {
|
||||
if base.ControllerKey != device.ControllerKey {
|
||||
return false
|
||||
}
|
||||
return *base.UnitNumber == *device.UnitNumber
|
||||
}
|
||||
|
||||
// In this case, the CreateVM() spec included one of the default devices
|
||||
devices = vm.removeDevice(devices, dspec)
|
||||
return false
|
||||
}) != nil {
|
||||
// UnitNumber for this device type is taken
|
||||
return invalid
|
||||
}
|
||||
|
||||
err := vm.configureDevice(devices, dspec)
|
||||
|
@ -686,7 +1171,11 @@ func (vm *VirtualMachine) configureDevices(spec *types.VirtualMachineConfigSpec)
|
|||
}
|
||||
}
|
||||
|
||||
vm.Config.Hardware.Device = []types.BaseVirtualDevice(devices)
|
||||
Map.Update(vm, []types.PropertyChange{
|
||||
{Name: "config.hardware.device", Val: []types.BaseVirtualDevice(devices)},
|
||||
})
|
||||
|
||||
vm.updateDiskLayouts()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -717,16 +1206,19 @@ func (c *powerVMTask) Run(task *Task) (types.AnyType, types.BaseMethodFault) {
|
|||
event := c.event()
|
||||
switch c.state {
|
||||
case types.VirtualMachinePowerStatePoweredOn:
|
||||
c.run.start(c.VirtualMachine)
|
||||
c.ctx.postEvent(
|
||||
&types.VmStartingEvent{VmEvent: event},
|
||||
&types.VmPoweredOnEvent{VmEvent: event},
|
||||
)
|
||||
case types.VirtualMachinePowerStatePoweredOff:
|
||||
c.run.stop(c.VirtualMachine)
|
||||
c.ctx.postEvent(
|
||||
&types.VmStoppingEvent{VmEvent: event},
|
||||
&types.VmPoweredOffEvent{VmEvent: event},
|
||||
)
|
||||
case types.VirtualMachinePowerStateSuspended:
|
||||
c.run.pause(c.VirtualMachine)
|
||||
c.ctx.postEvent(
|
||||
&types.VmSuspendingEvent{VmEvent: event},
|
||||
&types.VmSuspendedEvent{VmEvent: event},
|
||||
|
@ -823,6 +1315,25 @@ func (vm *VirtualMachine) ReconfigVMTask(ctx *Context, req *types.ReconfigVM_Tas
|
|||
}
|
||||
}
|
||||
|
||||
func (vm *VirtualMachine) UpgradeVMTask(req *types.UpgradeVM_Task) soap.HasFault {
|
||||
body := &methods.UpgradeVM_TaskBody{}
|
||||
|
||||
task := CreateTask(vm, "upgradeVm", func(t *Task) (types.AnyType, types.BaseMethodFault) {
|
||||
if vm.Config.Version != esx.HardwareVersion {
|
||||
Map.Update(vm, []types.PropertyChange{{
|
||||
Name: "config.version", Val: esx.HardwareVersion,
|
||||
}})
|
||||
}
|
||||
return nil, nil
|
||||
})
|
||||
|
||||
body.Res = &types.UpgradeVM_TaskResponse{
|
||||
Returnval: task.Run(),
|
||||
}
|
||||
|
||||
return body
|
||||
}
|
||||
|
||||
func (vm *VirtualMachine) DestroyTask(ctx *Context, req *types.Destroy_Task) soap.HasFault {
|
||||
task := CreateTask(vm, "destroy", func(t *Task) (types.AnyType, types.BaseMethodFault) {
|
||||
r := vm.UnregisterVM(ctx, &types.UnregisterVM{
|
||||
|
@ -848,6 +1359,8 @@ func (vm *VirtualMachine) DestroyTask(ctx *Context, req *types.Destroy_Task) soa
|
|||
Datacenter: &dc,
|
||||
})
|
||||
|
||||
vm.run.remove(vm)
|
||||
|
||||
return nil, nil
|
||||
})
|
||||
|
||||
|
@ -858,6 +1371,10 @@ func (vm *VirtualMachine) DestroyTask(ctx *Context, req *types.Destroy_Task) soa
|
|||
}
|
||||
}
|
||||
|
||||
func (vm *VirtualMachine) SetCustomValue(ctx *Context, req *types.SetCustomValue) soap.HasFault {
|
||||
return SetCustomValue(ctx, req)
|
||||
}
|
||||
|
||||
func (vm *VirtualMachine) UnregisterVM(ctx *Context, c *types.UnregisterVM) soap.HasFault {
|
||||
r := &methods.UnregisterVMBody{}
|
||||
|
||||
|
@ -917,21 +1434,24 @@ func (vm *VirtualMachine) CloneVMTask(ctx *Context, req *types.CloneVM_Task) soa
|
|||
},
|
||||
}
|
||||
|
||||
for _, device := range vm.Config.Hardware.Device {
|
||||
defaultDevices := object.VirtualDeviceList(esx.VirtualDevice)
|
||||
devices := vm.Config.Hardware.Device
|
||||
for _, device := range devices {
|
||||
var fop types.VirtualDeviceConfigSpecFileOperation
|
||||
|
||||
switch device.(type) {
|
||||
if defaultDevices.Find(object.VirtualDeviceList(devices).Name(device)) != nil {
|
||||
// Default devices are added during CreateVMTask
|
||||
continue
|
||||
}
|
||||
|
||||
switch disk := device.(type) {
|
||||
case *types.VirtualDisk:
|
||||
// TODO: consider VirtualMachineCloneSpec.DiskMoveType
|
||||
fop = types.VirtualDeviceConfigSpecFileOperationCreate
|
||||
device = &types.VirtualDisk{
|
||||
VirtualDevice: types.VirtualDevice{
|
||||
Backing: &types.VirtualDiskFlatVer2BackingInfo{
|
||||
DiskMode: string(types.VirtualDiskModePersistent),
|
||||
// Leave FileName empty so CreateVM will just create a new one under VmPathName
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Leave FileName empty so CreateVM will just create a new one under VmPathName
|
||||
disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo).FileName = ""
|
||||
disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo).Parent = nil
|
||||
}
|
||||
|
||||
config.DeviceChange = append(config.DeviceChange, &types.VirtualDeviceConfigSpec{
|
||||
|
@ -956,6 +1476,9 @@ func (vm *VirtualMachine) CloneVMTask(ctx *Context, req *types.CloneVM_Task) soa
|
|||
ref := ctask.Info.Result.(types.ManagedObjectReference)
|
||||
clone := Map.Get(ref).(*VirtualMachine)
|
||||
clone.configureDevices(&types.VirtualMachineConfigSpec{DeviceChange: req.Spec.Location.DeviceChange})
|
||||
if req.Spec.Config != nil && req.Spec.Config.DeviceChange != nil {
|
||||
clone.configureDevices(&types.VirtualMachineConfigSpec{DeviceChange: req.Spec.Config.DeviceChange})
|
||||
}
|
||||
|
||||
ctx.postEvent(&types.VmClonedEvent{
|
||||
VmCloneEvent: types.VmCloneEvent{VmEvent: clone.event()},
|
||||
|
@ -980,7 +1503,7 @@ func (vm *VirtualMachine) RelocateVMTask(req *types.RelocateVM_Task) soap.HasFau
|
|||
ds := Map.Get(*ref).(*Datastore)
|
||||
Map.RemoveReference(ds, &ds.Vm, *ref)
|
||||
|
||||
// TODO: migrate vm.Config.Files (and vm.Summary.Config.VmPathName)
|
||||
// TODO: migrate vm.Config.Files, vm.Summary.Config.VmPathName, vm.Layout and vm.LayoutEx
|
||||
|
||||
changes = append(changes, types.PropertyChange{Name: "datastore", Val: []types.ManagedObjectReference{*ref}})
|
||||
}
|
||||
|
@ -989,7 +1512,7 @@ func (vm *VirtualMachine) RelocateVMTask(req *types.RelocateVM_Task) soap.HasFau
|
|||
pool := Map.Get(*ref).(*ResourcePool)
|
||||
Map.RemoveReference(pool, &pool.Vm, *ref)
|
||||
|
||||
changes = append(changes, types.PropertyChange{Name: "resourcePool", Val: *ref})
|
||||
changes = append(changes, types.PropertyChange{Name: "resourcePool", Val: ref})
|
||||
}
|
||||
|
||||
if ref := req.Spec.Host; ref != nil {
|
||||
|
@ -997,8 +1520,8 @@ func (vm *VirtualMachine) RelocateVMTask(req *types.RelocateVM_Task) soap.HasFau
|
|||
Map.RemoveReference(host, &host.Vm, *ref)
|
||||
|
||||
changes = append(changes,
|
||||
types.PropertyChange{Name: "runtime.host", Val: *ref},
|
||||
types.PropertyChange{Name: "summary.runtime.host", Val: *ref},
|
||||
types.PropertyChange{Name: "runtime.host", Val: ref},
|
||||
types.PropertyChange{Name: "summary.runtime.host", Val: ref},
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -1016,6 +1539,8 @@ func (vm *VirtualMachine) RelocateVMTask(req *types.RelocateVM_Task) soap.HasFau
|
|||
|
||||
func (vm *VirtualMachine) CreateSnapshotTask(req *types.CreateSnapshot_Task) soap.HasFault {
|
||||
task := CreateTask(vm, "createSnapshot", func(t *Task) (types.AnyType, types.BaseMethodFault) {
|
||||
var changes []types.PropertyChange
|
||||
|
||||
if vm.Snapshot == nil {
|
||||
vm.Snapshot = &types.VirtualMachineSnapshotInfo{}
|
||||
}
|
||||
|
@ -1047,10 +1572,16 @@ func (vm *VirtualMachine) CreateSnapshotTask(req *types.CreateSnapshot_Task) soa
|
|||
ss := findSnapshotInTree(vm.Snapshot.RootSnapshotList, *cur)
|
||||
ss.ChildSnapshotList = append(ss.ChildSnapshotList, treeItem)
|
||||
} else {
|
||||
vm.Snapshot.RootSnapshotList = append(vm.Snapshot.RootSnapshotList, treeItem)
|
||||
changes = append(changes, types.PropertyChange{
|
||||
Name: "snapshot.rootSnapshotList",
|
||||
Val: append(vm.Snapshot.RootSnapshotList, treeItem),
|
||||
})
|
||||
}
|
||||
|
||||
vm.Snapshot.CurrentSnapshot = &snapshot.Self
|
||||
snapshot.createSnapshotFiles()
|
||||
|
||||
changes = append(changes, types.PropertyChange{Name: "snapshot.currentSnapshot", Val: snapshot.Self})
|
||||
Map.Update(vm, changes)
|
||||
|
||||
return nil, nil
|
||||
})
|
||||
|
@ -1082,7 +1613,7 @@ func (vm *VirtualMachine) RevertToCurrentSnapshotTask(req *types.RevertToCurrent
|
|||
return body
|
||||
}
|
||||
|
||||
func (vm *VirtualMachine) RemoveAllSnapshotsTask(req *types.RemoveAllSnapshots_Task) soap.HasFault {
|
||||
func (vm *VirtualMachine) RemoveAllSnapshotsTask(ctx *Context, req *types.RemoveAllSnapshots_Task) soap.HasFault {
|
||||
task := CreateTask(vm, "RemoveAllSnapshots", func(t *Task) (types.AnyType, types.BaseMethodFault) {
|
||||
if vm.Snapshot == nil {
|
||||
return nil, nil
|
||||
|
@ -1090,9 +1621,12 @@ func (vm *VirtualMachine) RemoveAllSnapshotsTask(req *types.RemoveAllSnapshots_T
|
|||
|
||||
refs := allSnapshotsInTree(vm.Snapshot.RootSnapshotList)
|
||||
|
||||
vm.Snapshot = nil
|
||||
Map.Update(vm, []types.PropertyChange{
|
||||
{Name: "snapshot", Val: nil},
|
||||
})
|
||||
|
||||
for _, ref := range refs {
|
||||
Map.Get(ref).(*VirtualMachineSnapshot).removeSnapshotFiles(ctx)
|
||||
Map.Remove(ref)
|
||||
}
|
||||
|
||||
|
@ -1126,6 +1660,7 @@ func (vm *VirtualMachine) ShutdownGuest(ctx *Context, c *types.ShutdownGuest) so
|
|||
&types.VmGuestShutdownEvent{VmEvent: event},
|
||||
&types.VmPoweredOffEvent{VmEvent: event},
|
||||
)
|
||||
vm.run.stop(vm)
|
||||
|
||||
Map.Update(vm, []types.PropertyChange{
|
||||
{Name: "runtime.powerState", Val: types.VirtualMachinePowerStatePoweredOff},
|
||||
|
|
|
@ -4,9 +4,12 @@ go_library(
|
|||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"performance_manager.go",
|
||||
"performance_manager_data.go",
|
||||
"root_folder.go",
|
||||
"service_content.go",
|
||||
"setting.go",
|
||||
"task_manager.go",
|
||||
],
|
||||
importmap = "k8s.io/kubernetes/vendor/github.com/vmware/govmomi/simulator/vpx",
|
||||
importpath = "github.com/vmware/govmomi/simulator/vpx",
|
||||
|
|
21801
vendor/github.com/vmware/govmomi/simulator/vpx/performance_manager.go
generated
vendored
Normal file
21801
vendor/github.com/vmware/govmomi/simulator/vpx/performance_manager.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1877
vendor/github.com/vmware/govmomi/simulator/vpx/performance_manager_data.go
generated
vendored
Normal file
1877
vendor/github.com/vmware/govmomi/simulator/vpx/performance_manager_data.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
404
vendor/github.com/vmware/govmomi/simulator/vstorage_object_manager.go
generated
vendored
Normal file
404
vendor/github.com/vmware/govmomi/simulator/vstorage_object_manager.go
generated
vendored
Normal file
|
@ -0,0 +1,404 @@
|
|||
/*
|
||||
Copyright (c) 2018 VMware, Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package simulator
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/vim25/methods"
|
||||
"github.com/vmware/govmomi/vim25/mo"
|
||||
"github.com/vmware/govmomi/vim25/soap"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
)
|
||||
|
||||
type VStorageObject struct {
|
||||
types.VStorageObject
|
||||
types.VStorageObjectSnapshotInfo
|
||||
}
|
||||
|
||||
type VcenterVStorageObjectManager struct {
|
||||
mo.VcenterVStorageObjectManager
|
||||
|
||||
objects map[types.ManagedObjectReference]map[types.ID]*VStorageObject
|
||||
}
|
||||
|
||||
func NewVcenterVStorageObjectManager(ref types.ManagedObjectReference) object.Reference {
|
||||
m := &VcenterVStorageObjectManager{}
|
||||
m.Self = ref
|
||||
m.objects = make(map[types.ManagedObjectReference]map[types.ID]*VStorageObject)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *VcenterVStorageObjectManager) object(ds types.ManagedObjectReference, id types.ID) *VStorageObject {
|
||||
if objects, ok := m.objects[ds]; ok {
|
||||
return objects[id]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *VcenterVStorageObjectManager) ListVStorageObject(req *types.ListVStorageObject) soap.HasFault {
|
||||
body := &methods.ListVStorageObjectBody{
|
||||
Res: &types.ListVStorageObjectResponse{},
|
||||
}
|
||||
|
||||
if objects, ok := m.objects[req.Datastore]; ok {
|
||||
for id := range objects {
|
||||
body.Res.Returnval = append(body.Res.Returnval, id)
|
||||
}
|
||||
}
|
||||
|
||||
return body
|
||||
}
|
||||
|
||||
func (m *VcenterVStorageObjectManager) RetrieveVStorageObject(req *types.RetrieveVStorageObject) soap.HasFault {
|
||||
body := new(methods.RetrieveVStorageObjectBody)
|
||||
|
||||
obj := m.object(req.Datastore, req.Id)
|
||||
if obj == nil {
|
||||
body.Fault_ = Fault("", new(types.InvalidArgument))
|
||||
} else {
|
||||
body.Res = &types.RetrieveVStorageObjectResponse{
|
||||
Returnval: obj.VStorageObject,
|
||||
}
|
||||
}
|
||||
|
||||
return body
|
||||
}
|
||||
|
||||
func (m *VcenterVStorageObjectManager) RegisterDisk(ctx *Context, req *types.RegisterDisk) soap.HasFault {
|
||||
body := new(methods.RegisterDiskBody)
|
||||
|
||||
invalid := func() soap.HasFault {
|
||||
body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "path"})
|
||||
return body
|
||||
}
|
||||
|
||||
u, err := url.Parse(req.Path)
|
||||
if err != nil {
|
||||
return invalid()
|
||||
}
|
||||
u.Path = strings.TrimPrefix(u.Path, folderPrefix)
|
||||
|
||||
ds, err := ctx.svc.findDatastore(u.Query())
|
||||
if err != nil {
|
||||
return invalid()
|
||||
}
|
||||
|
||||
st, err := os.Stat(filepath.Join(ds.Info.GetDatastoreInfo().Url, u.Path))
|
||||
if err != nil {
|
||||
return invalid()
|
||||
|
||||
}
|
||||
if st.IsDir() {
|
||||
return invalid()
|
||||
}
|
||||
|
||||
path := (&object.DatastorePath{Datastore: ds.Name, Path: u.Path}).String()
|
||||
|
||||
for _, obj := range m.objects[ds.Self] {
|
||||
backing := obj.Config.BaseConfigInfo.Backing.(*types.BaseConfigInfoDiskFileBackingInfo)
|
||||
if backing.FilePath == path {
|
||||
return invalid()
|
||||
}
|
||||
}
|
||||
|
||||
creq := &types.CreateDisk_Task{
|
||||
Spec: types.VslmCreateSpec{
|
||||
Name: req.Name,
|
||||
BackingSpec: &types.VslmCreateSpecDiskFileBackingSpec{
|
||||
VslmCreateSpecBackingSpec: types.VslmCreateSpecBackingSpec{
|
||||
Datastore: ds.Self,
|
||||
Path: u.Path,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
obj, fault := m.createObject(creq, true)
|
||||
if fault != nil {
|
||||
body.Fault_ = Fault("", fault)
|
||||
return body
|
||||
}
|
||||
|
||||
body.Res = &types.RegisterDiskResponse{
|
||||
Returnval: *obj,
|
||||
}
|
||||
|
||||
return body
|
||||
}
|
||||
|
||||
func (m *VcenterVStorageObjectManager) createObject(req *types.CreateDisk_Task, register bool) (*types.VStorageObject, types.BaseMethodFault) {
|
||||
dir := "fcd"
|
||||
ref := req.Spec.BackingSpec.GetVslmCreateSpecBackingSpec().Datastore
|
||||
ds := Map.Get(ref).(*Datastore)
|
||||
dc := Map.getEntityDatacenter(ds)
|
||||
dm := Map.VirtualDiskManager()
|
||||
|
||||
objects, ok := m.objects[ds.Self]
|
||||
if !ok {
|
||||
objects = make(map[types.ID]*VStorageObject)
|
||||
m.objects[ds.Self] = objects
|
||||
_ = os.Mkdir(filepath.Join(ds.Info.GetDatastoreInfo().Url, dir), 0755)
|
||||
}
|
||||
|
||||
id := uuid.New().String()
|
||||
obj := types.VStorageObject{
|
||||
Config: types.VStorageObjectConfigInfo{
|
||||
BaseConfigInfo: types.BaseConfigInfo{
|
||||
Id: types.ID{
|
||||
Id: id,
|
||||
},
|
||||
Name: req.Spec.Name,
|
||||
CreateTime: time.Now(),
|
||||
KeepAfterDeleteVm: req.Spec.KeepAfterDeleteVm,
|
||||
RelocationDisabled: types.NewBool(false),
|
||||
NativeSnapshotSupported: types.NewBool(false),
|
||||
ChangedBlockTrackingEnabled: types.NewBool(false),
|
||||
Iofilter: nil,
|
||||
},
|
||||
CapacityInMB: req.Spec.CapacityInMB,
|
||||
ConsumptionType: []string{"disk"},
|
||||
ConsumerId: nil,
|
||||
},
|
||||
}
|
||||
|
||||
backing := req.Spec.BackingSpec.(*types.VslmCreateSpecDiskFileBackingSpec)
|
||||
path := object.DatastorePath{
|
||||
Datastore: ds.Name,
|
||||
Path: backing.Path,
|
||||
}
|
||||
if path.Path == "" {
|
||||
path.Path = dir + "/" + id + ".vmdk"
|
||||
}
|
||||
|
||||
if register == false {
|
||||
err := dm.createVirtualDisk(types.VirtualDeviceConfigSpecFileOperationCreate, &types.CreateVirtualDisk_Task{
|
||||
Datacenter: &dc.Self,
|
||||
Name: path.String(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
obj.Config.BaseConfigInfo.Backing = &types.BaseConfigInfoDiskFileBackingInfo{
|
||||
BaseConfigInfoFileBackingInfo: types.BaseConfigInfoFileBackingInfo{
|
||||
BaseConfigInfoBackingInfo: types.BaseConfigInfoBackingInfo{
|
||||
Datastore: ds.Self,
|
||||
},
|
||||
FilePath: path.String(),
|
||||
BackingObjectId: uuid.New().String(),
|
||||
Parent: nil,
|
||||
DeltaSizeInMB: 0,
|
||||
},
|
||||
ProvisioningType: backing.ProvisioningType,
|
||||
}
|
||||
|
||||
objects[obj.Config.Id] = &VStorageObject{VStorageObject: obj}
|
||||
|
||||
return &obj, nil
|
||||
|
||||
}
|
||||
|
||||
func (m *VcenterVStorageObjectManager) CreateDiskTask(req *types.CreateDisk_Task) soap.HasFault {
|
||||
task := CreateTask(m, "createDisk", func(*Task) (types.AnyType, types.BaseMethodFault) {
|
||||
return m.createObject(req, false)
|
||||
})
|
||||
|
||||
return &methods.CreateDisk_TaskBody{
|
||||
Res: &types.CreateDisk_TaskResponse{
|
||||
Returnval: task.Run(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *VcenterVStorageObjectManager) DeleteVStorageObjectTask(req *types.DeleteVStorageObject_Task) soap.HasFault {
|
||||
task := CreateTask(m, "deleteDisk", func(*Task) (types.AnyType, types.BaseMethodFault) {
|
||||
obj := m.object(req.Datastore, req.Id)
|
||||
if obj == nil {
|
||||
return nil, &types.InvalidArgument{}
|
||||
}
|
||||
|
||||
backing := obj.Config.Backing.(*types.BaseConfigInfoDiskFileBackingInfo)
|
||||
ds := Map.Get(req.Datastore).(*Datastore)
|
||||
dc := Map.getEntityDatacenter(ds)
|
||||
dm := Map.VirtualDiskManager()
|
||||
dm.DeleteVirtualDiskTask(&types.DeleteVirtualDisk_Task{
|
||||
Name: backing.FilePath,
|
||||
Datacenter: &dc.Self,
|
||||
})
|
||||
|
||||
delete(m.objects[req.Datastore], req.Id)
|
||||
|
||||
return nil, nil
|
||||
})
|
||||
|
||||
return &methods.DeleteVStorageObject_TaskBody{
|
||||
Res: &types.DeleteVStorageObject_TaskResponse{
|
||||
Returnval: task.Run(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *VcenterVStorageObjectManager) RetrieveSnapshotInfo(req *types.RetrieveSnapshotInfo) soap.HasFault {
|
||||
body := new(methods.RetrieveSnapshotInfoBody)
|
||||
|
||||
obj := m.object(req.Datastore, req.Id)
|
||||
if obj == nil {
|
||||
body.Fault_ = Fault("", new(types.InvalidArgument))
|
||||
} else {
|
||||
body.Res = &types.RetrieveSnapshotInfoResponse{
|
||||
Returnval: obj.VStorageObjectSnapshotInfo,
|
||||
}
|
||||
}
|
||||
|
||||
return body
|
||||
}
|
||||
|
||||
func (m *VcenterVStorageObjectManager) VStorageObjectCreateSnapshotTask(req *types.VStorageObjectCreateSnapshot_Task) soap.HasFault {
|
||||
task := CreateTask(m, "createSnapshot", func(*Task) (types.AnyType, types.BaseMethodFault) {
|
||||
obj := m.object(req.Datastore, req.Id)
|
||||
if obj == nil {
|
||||
return nil, new(types.InvalidArgument)
|
||||
}
|
||||
|
||||
snapshot := types.VStorageObjectSnapshotInfoVStorageObjectSnapshot{
|
||||
Id: &types.ID{
|
||||
Id: uuid.New().String(),
|
||||
},
|
||||
BackingObjectId: uuid.New().String(),
|
||||
CreateTime: time.Now(),
|
||||
Description: req.Description,
|
||||
}
|
||||
obj.Snapshots = append(obj.Snapshots, snapshot)
|
||||
|
||||
return snapshot.Id, nil
|
||||
})
|
||||
|
||||
return &methods.VStorageObjectCreateSnapshot_TaskBody{
|
||||
Res: &types.VStorageObjectCreateSnapshot_TaskResponse{
|
||||
Returnval: task.Run(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *VcenterVStorageObjectManager) DeleteSnapshotTask(req *types.DeleteSnapshot_Task) soap.HasFault {
|
||||
task := CreateTask(m, "deleteSnapshot", func(*Task) (types.AnyType, types.BaseMethodFault) {
|
||||
obj := m.object(req.Datastore, req.Id)
|
||||
if obj != nil {
|
||||
for i := range obj.Snapshots {
|
||||
if *obj.Snapshots[i].Id == req.SnapshotId {
|
||||
obj.Snapshots = append(obj.Snapshots[:i], obj.Snapshots[i+1:]...)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, new(types.InvalidArgument)
|
||||
})
|
||||
|
||||
return &methods.DeleteSnapshot_TaskBody{
|
||||
Res: &types.DeleteSnapshot_TaskResponse{
|
||||
Returnval: task.Run(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *VcenterVStorageObjectManager) tagID(id types.ID) types.ManagedObjectReference {
|
||||
return types.ManagedObjectReference{
|
||||
Type: "fcd",
|
||||
Value: id.Id,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *VcenterVStorageObjectManager) AttachTagToVStorageObject(ctx *Context, req *types.AttachTagToVStorageObject) soap.HasFault {
|
||||
body := new(methods.AttachTagToVStorageObjectBody)
|
||||
ref := m.tagID(req.Id)
|
||||
|
||||
err := ctx.Map.tagManager.AttachTag(ref, types.VslmTagEntry{
|
||||
ParentCategoryName: req.Category,
|
||||
TagName: req.Tag,
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
body.Res = new(types.AttachTagToVStorageObjectResponse)
|
||||
} else {
|
||||
body.Fault_ = Fault("", err)
|
||||
}
|
||||
|
||||
return body
|
||||
}
|
||||
|
||||
func (m *VcenterVStorageObjectManager) DetachTagFromVStorageObject(ctx *Context, req *types.DetachTagFromVStorageObject) soap.HasFault {
|
||||
body := new(methods.DetachTagFromVStorageObjectBody)
|
||||
ref := m.tagID(req.Id)
|
||||
|
||||
err := ctx.Map.tagManager.DetachTag(ref, types.VslmTagEntry{
|
||||
ParentCategoryName: req.Category,
|
||||
TagName: req.Tag,
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
body.Res = new(types.DetachTagFromVStorageObjectResponse)
|
||||
} else {
|
||||
body.Fault_ = Fault("", err)
|
||||
}
|
||||
|
||||
return body
|
||||
}
|
||||
|
||||
func (m *VcenterVStorageObjectManager) ListVStorageObjectsAttachedToTag(ctx *Context, req *types.ListVStorageObjectsAttachedToTag) soap.HasFault {
|
||||
body := new(methods.ListVStorageObjectsAttachedToTagBody)
|
||||
|
||||
refs, err := ctx.Map.tagManager.AttachedObjects(types.VslmTagEntry{
|
||||
ParentCategoryName: req.Category,
|
||||
TagName: req.Tag,
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
body.Res = new(types.ListVStorageObjectsAttachedToTagResponse)
|
||||
for _, ref := range refs {
|
||||
body.Res.Returnval = append(body.Res.Returnval, types.ID{Id: ref.Value})
|
||||
}
|
||||
} else {
|
||||
body.Fault_ = Fault("", err)
|
||||
}
|
||||
|
||||
return body
|
||||
}
|
||||
|
||||
func (m *VcenterVStorageObjectManager) ListTagsAttachedToVStorageObject(ctx *Context, req *types.ListTagsAttachedToVStorageObject) soap.HasFault {
|
||||
body := new(methods.ListTagsAttachedToVStorageObjectBody)
|
||||
ref := m.tagID(req.Id)
|
||||
|
||||
tags, err := ctx.Map.tagManager.AttachedTags(ref)
|
||||
|
||||
if err == nil {
|
||||
body.Res = &types.ListTagsAttachedToVStorageObjectResponse{
|
||||
Returnval: tags,
|
||||
}
|
||||
} else {
|
||||
body.Fault_ = Fault("", err)
|
||||
}
|
||||
|
||||
return body
|
||||
}
|
|
@ -70,7 +70,10 @@ type TokenRequest struct {
|
|||
Lifetime time.Duration // Lifetime is the token's lifetime, defaults to 10m
|
||||
Renewable bool // Renewable allows the issued token to be renewed
|
||||
Delegatable bool // Delegatable allows the issued token to be delegated (e.g. for use with ActAs)
|
||||
Token string // Token for Renew request or Issue request ActAs identity
|
||||
ActAs bool // ActAs allows to request an ActAs token based on the passed Token.
|
||||
Token string // Token for Renew request or Issue request ActAs identity or to be exchanged.
|
||||
KeyType string // KeyType for requested token (if not set will be decucted from Userinfo and Certificate options)
|
||||
KeyID string // KeyID used for signing the requests
|
||||
}
|
||||
|
||||
func (c *Client) newRequest(req TokenRequest, kind string, s *Signer) (internal.RequestSecurityToken, error) {
|
||||
|
@ -96,16 +99,27 @@ func (c *Client) newRequest(req TokenRequest, kind string, s *Signer) (internal.
|
|||
OK: false,
|
||||
},
|
||||
Delegatable: req.Delegatable,
|
||||
KeyType: req.KeyType,
|
||||
}
|
||||
|
||||
if req.Certificate == nil {
|
||||
if req.Userinfo == nil {
|
||||
return rst, errors.New("one of TokenRequest Certificate or Userinfo is required")
|
||||
if req.KeyType == "" {
|
||||
// Deduce KeyType based on Certificate nad Userinfo.
|
||||
if req.Certificate == nil {
|
||||
if req.Userinfo == nil {
|
||||
return rst, errors.New("one of TokenRequest Certificate or Userinfo is required")
|
||||
}
|
||||
rst.KeyType = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/Bearer"
|
||||
} else {
|
||||
rst.KeyType = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/PublicKey"
|
||||
// For HOK KeyID is required.
|
||||
if req.KeyID == "" {
|
||||
req.KeyID = newID()
|
||||
}
|
||||
}
|
||||
rst.KeyType = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/Bearer"
|
||||
} else {
|
||||
rst.KeyType = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/PublicKey"
|
||||
rst.UseKey = &internal.UseKey{Sig: newID()}
|
||||
}
|
||||
|
||||
if req.KeyID != "" {
|
||||
rst.UseKey = &internal.UseKey{Sig: req.KeyID}
|
||||
s.keyID = rst.UseKey.Sig
|
||||
}
|
||||
|
||||
|
@ -131,6 +145,8 @@ func (s *Signer) setLifetime(lifetime *internal.Lifetime) error {
|
|||
func (c *Client) Issue(ctx context.Context, req TokenRequest) (*Signer, error) {
|
||||
s := &Signer{
|
||||
Certificate: req.Certificate,
|
||||
keyID: req.KeyID,
|
||||
Token: req.Token,
|
||||
user: req.Userinfo,
|
||||
}
|
||||
|
||||
|
@ -139,7 +155,7 @@ func (c *Client) Issue(ctx context.Context, req TokenRequest) (*Signer, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if req.Token != "" {
|
||||
if req.ActAs {
|
||||
rst.ActAs = &internal.Target{
|
||||
Token: req.Token,
|
||||
}
|
||||
|
|
|
@ -17,6 +17,8 @@ limitations under the License.
|
|||
package sts
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
|
@ -25,7 +27,12 @@ import (
|
|||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
mrand "math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
@ -218,3 +225,101 @@ func (s *Signer) Sign(env soap.Envelope) ([]byte, error) {
|
|||
Body: body,
|
||||
})
|
||||
}
|
||||
|
||||
// SignRequest is a rest.Signer implementation which can be used to sign rest.Client.LoginByTokenBody requests.
|
||||
func (s *Signer) SignRequest(req *http.Request) error {
|
||||
type param struct {
|
||||
key, val string
|
||||
}
|
||||
var params []string
|
||||
add := func(p param) {
|
||||
params = append(params, fmt.Sprintf(`%s="%s"`, p.key, p.val))
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
gz := gzip.NewWriter(&buf)
|
||||
if _, err := io.WriteString(gz, s.Token); err != nil {
|
||||
return fmt.Errorf("zip token: %s", err)
|
||||
}
|
||||
if err := gz.Close(); err != nil {
|
||||
return fmt.Errorf("zip token: %s", err)
|
||||
}
|
||||
add(param{
|
||||
key: "token",
|
||||
val: base64.StdEncoding.EncodeToString(buf.Bytes()),
|
||||
})
|
||||
|
||||
if s.Certificate != nil {
|
||||
nonce := fmt.Sprintf("%d:%d", time.Now().UnixNano()/1e6, mrand.Int())
|
||||
var body []byte
|
||||
if req.GetBody != nil {
|
||||
r, rerr := req.GetBody()
|
||||
if rerr != nil {
|
||||
return fmt.Errorf("sts: getting http.Request body: %s", rerr)
|
||||
}
|
||||
defer r.Close()
|
||||
body, rerr = ioutil.ReadAll(r)
|
||||
if rerr != nil {
|
||||
return fmt.Errorf("sts: reading http.Request body: %s", rerr)
|
||||
}
|
||||
}
|
||||
bhash := sha256.New().Sum(body)
|
||||
|
||||
// Port in the signature must be that of the reverse proxy port, vCenter's default is port 80
|
||||
port := "80" // TODO: get from lookup service
|
||||
var buf bytes.Buffer
|
||||
msg := []string{
|
||||
nonce,
|
||||
req.Method,
|
||||
req.URL.Path,
|
||||
strings.ToLower(req.URL.Hostname()),
|
||||
port,
|
||||
}
|
||||
for i := range msg {
|
||||
buf.WriteString(msg[i])
|
||||
buf.WriteByte('\n')
|
||||
}
|
||||
buf.Write(bhash)
|
||||
buf.WriteByte('\n')
|
||||
|
||||
sum := sha256.Sum256(buf.Bytes())
|
||||
key, ok := s.Certificate.PrivateKey.(*rsa.PrivateKey)
|
||||
if !ok {
|
||||
return errors.New("sts: rsa.PrivateKey is required to sign http.Request")
|
||||
}
|
||||
sig, err := rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, sum[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
add(param{
|
||||
key: "signature_alg",
|
||||
val: "RSA-SHA256",
|
||||
})
|
||||
add(param{
|
||||
key: "signature",
|
||||
val: base64.StdEncoding.EncodeToString(sig),
|
||||
})
|
||||
add(param{
|
||||
key: "nonce",
|
||||
val: nonce,
|
||||
})
|
||||
add(param{
|
||||
key: "bodyhash",
|
||||
val: base64.StdEncoding.EncodeToString(bhash),
|
||||
})
|
||||
}
|
||||
|
||||
req.Header.Set("Authorization", fmt.Sprintf("SIGN %s", strings.Join(params, ", ")))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Signer) NewRequest() TokenRequest {
|
||||
return TokenRequest{
|
||||
Token: s.Token,
|
||||
Certificate: s.Certificate,
|
||||
Userinfo: s.user,
|
||||
KeyID: s.keyID,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,15 +51,13 @@ func (o AssociatedObject) Reference() types.ManagedObjectReference {
|
|||
|
||||
// Association for tag-association requests.
|
||||
type Association struct {
|
||||
TagID string `json:"tag_id,omitempty"`
|
||||
ObjectID *AssociatedObject `json:"object_id,omitempty"`
|
||||
}
|
||||
|
||||
// NewAssociation returns an Association, converting ref to an AssociatedObject.
|
||||
func NewAssociation(tagID string, ref mo.Reference) Association {
|
||||
func NewAssociation(ref mo.Reference) Association {
|
||||
obj := AssociatedObject(ref.Reference())
|
||||
return Association{
|
||||
TagID: tagID,
|
||||
ObjectID: &obj,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,6 +43,16 @@ func NewClient(c *vim25.Client) *Client {
|
|||
return &Client{sc}
|
||||
}
|
||||
|
||||
type Signer interface {
|
||||
SignRequest(*http.Request) error
|
||||
}
|
||||
|
||||
type signerContext struct{}
|
||||
|
||||
func (c *Client) WithSigner(ctx context.Context, s Signer) context.Context {
|
||||
return context.WithValue(ctx, signerContext{}, s)
|
||||
}
|
||||
|
||||
// Do sends the http.Request, decoding resBody if provided.
|
||||
func (c *Client) Do(ctx context.Context, req *http.Request, resBody interface{}) error {
|
||||
switch req.Method {
|
||||
|
@ -52,6 +62,12 @@ func (c *Client) Do(ctx context.Context, req *http.Request, resBody interface{})
|
|||
|
||||
req.Header.Set("Accept", "application/json")
|
||||
|
||||
if s, ok := ctx.Value(signerContext{}).(Signer); ok {
|
||||
if err := s.SignRequest(req); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return c.Client.Do(ctx, req, func(res *http.Response) error {
|
||||
switch res.StatusCode {
|
||||
case http.StatusOK:
|
||||
|
@ -98,6 +114,10 @@ func (c *Client) Login(ctx context.Context, user *url.Userinfo) error {
|
|||
return c.Do(ctx, req, nil)
|
||||
}
|
||||
|
||||
func (c *Client) LoginByToken(ctx context.Context) error {
|
||||
return c.Login(ctx, nil)
|
||||
}
|
||||
|
||||
// Logout deletes the current session.
|
||||
func (c *Client) Logout(ctx context.Context) error {
|
||||
req := internal.URL(c, internal.SessionPath).Request(http.MethodDelete)
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/vmware/govmomi/vapi/internal"
|
||||
|
@ -33,12 +34,19 @@ import (
|
|||
vim "github.com/vmware/govmomi/vim25/types"
|
||||
)
|
||||
|
||||
type session struct {
|
||||
User string `json:"user"`
|
||||
Created time.Time `json:"created_time"`
|
||||
LastAccessed time.Time `json:"last_accessed_time"`
|
||||
}
|
||||
|
||||
type handler struct {
|
||||
*http.ServeMux
|
||||
sync.Mutex
|
||||
Category map[string]*tags.Category
|
||||
Tag map[string]*tags.Tag
|
||||
Association map[string]map[internal.AssociatedObject]bool
|
||||
Session map[string]*session
|
||||
}
|
||||
|
||||
// New creates a vAPI simulator.
|
||||
|
@ -48,6 +56,7 @@ func New(u *url.URL, settings []vim.BaseOptionValue) (string, http.Handler) {
|
|||
Category: make(map[string]*tags.Category),
|
||||
Tag: make(map[string]*tags.Tag),
|
||||
Association: make(map[string]map[internal.AssociatedObject]bool),
|
||||
Session: make(map[string]*session),
|
||||
}
|
||||
|
||||
handlers := []struct {
|
||||
|
@ -60,6 +69,7 @@ func New(u *url.URL, settings []vim.BaseOptionValue) (string, http.Handler) {
|
|||
{internal.TagPath, s.tag},
|
||||
{internal.TagPath + "/", s.tagID},
|
||||
{internal.AssociationPath, s.association},
|
||||
{internal.AssociationPath + "/", s.associationID},
|
||||
}
|
||||
|
||||
for i := range handlers {
|
||||
|
@ -68,6 +78,11 @@ func New(u *url.URL, settings []vim.BaseOptionValue) (string, http.Handler) {
|
|||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if !s.isAuthorized(r) {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
h.m(w, r)
|
||||
})
|
||||
}
|
||||
|
@ -75,6 +90,99 @@ func New(u *url.URL, settings []vim.BaseOptionValue) (string, http.Handler) {
|
|||
return internal.Path + "/", s
|
||||
}
|
||||
|
||||
func (s *handler) isAuthorized(r *http.Request) bool {
|
||||
if r.Method == http.MethodPost && strings.HasSuffix(r.URL.Path, internal.SessionPath) {
|
||||
return true
|
||||
}
|
||||
id := r.Header.Get(internal.SessionCookieName)
|
||||
if id == "" {
|
||||
if cookie, err := r.Cookie(internal.SessionCookieName); err == nil {
|
||||
id = cookie.Value
|
||||
r.Header.Set(internal.SessionCookieName, id)
|
||||
}
|
||||
}
|
||||
info, ok := s.Session[id]
|
||||
if ok {
|
||||
info.LastAccessed = time.Now()
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
func (s *handler) hasAuthorization(r *http.Request) (string, bool) {
|
||||
u, p, ok := r.BasicAuth()
|
||||
if ok { // user+pass auth
|
||||
if u == "" || p == "" {
|
||||
return u, false
|
||||
}
|
||||
return u, true
|
||||
}
|
||||
auth := r.Header.Get("Authorization")
|
||||
return "TODO", strings.HasPrefix(auth, "SIGN ") // token auth
|
||||
}
|
||||
|
||||
func (s *handler) findTag(e vim.VslmTagEntry) *tags.Tag {
|
||||
for _, c := range s.Category {
|
||||
if c.Name == e.ParentCategoryName {
|
||||
for _, t := range s.Tag {
|
||||
if t.Name == e.TagName && t.CategoryID == c.ID {
|
||||
return t
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AttachedObjects is meant for internal use via simulator.Registry.tagManager
|
||||
func (s *handler) AttachedObjects(tag vim.VslmTagEntry) ([]vim.ManagedObjectReference, vim.BaseMethodFault) {
|
||||
t := s.findTag(tag)
|
||||
if t == nil {
|
||||
return nil, new(vim.NotFound)
|
||||
}
|
||||
var ids []vim.ManagedObjectReference
|
||||
for id := range s.Association[t.ID] {
|
||||
ids = append(ids, vim.ManagedObjectReference(id))
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// AttachedTags is meant for internal use via simulator.Registry.tagManager
|
||||
func (s *handler) AttachedTags(ref vim.ManagedObjectReference) ([]vim.VslmTagEntry, vim.BaseMethodFault) {
|
||||
oid := internal.AssociatedObject(ref)
|
||||
var tags []vim.VslmTagEntry
|
||||
for id, objs := range s.Association {
|
||||
if objs[oid] {
|
||||
tag := s.Tag[id]
|
||||
cat := s.Category[tag.CategoryID]
|
||||
tags = append(tags, vim.VslmTagEntry{
|
||||
TagName: tag.Name,
|
||||
ParentCategoryName: cat.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
// AttachTag is meant for internal use via simulator.Registry.tagManager
|
||||
func (s *handler) AttachTag(ref vim.ManagedObjectReference, tag vim.VslmTagEntry) vim.BaseMethodFault {
|
||||
t := s.findTag(tag)
|
||||
if t == nil {
|
||||
return new(vim.NotFound)
|
||||
}
|
||||
s.Association[t.ID][internal.AssociatedObject(ref)] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// DetachTag is meant for internal use via simulator.Registry.tagManager
|
||||
func (s *handler) DetachTag(id vim.ManagedObjectReference, tag vim.VslmTagEntry) vim.BaseMethodFault {
|
||||
t := s.findTag(tag)
|
||||
if t == nil {
|
||||
return new(vim.NotFound)
|
||||
}
|
||||
delete(s.Association[t.ID], internal.AssociatedObject(id))
|
||||
return nil
|
||||
}
|
||||
|
||||
// ok responds with http.StatusOK and json encodes val if given.
|
||||
func (s *handler) ok(w http.ResponseWriter, val ...interface{}) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
@ -136,23 +244,28 @@ func (s *handler) decode(r *http.Request, w http.ResponseWriter, val interface{}
|
|||
}
|
||||
|
||||
func (s *handler) session(w http.ResponseWriter, r *http.Request) {
|
||||
var id string
|
||||
id := r.Header.Get(internal.SessionCookieName)
|
||||
|
||||
switch r.Method {
|
||||
case http.MethodPost:
|
||||
user, ok := s.hasAuthorization(r)
|
||||
if !ok {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
id = uuid.New().String()
|
||||
// TODO: save session
|
||||
now := time.Now()
|
||||
s.Session[id] = &session{user, now, now}
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: internal.SessionCookieName,
|
||||
Value: id,
|
||||
})
|
||||
s.ok(w)
|
||||
s.ok(w, id)
|
||||
case http.MethodDelete:
|
||||
// TODO: delete session
|
||||
delete(s.Session, id)
|
||||
s.ok(w)
|
||||
case http.MethodGet:
|
||||
// TODO: test is session is valid
|
||||
s.ok(w, id)
|
||||
s.ok(w, s.Session[id])
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -161,8 +274,12 @@ func (s *handler) action(r *http.Request) string {
|
|||
}
|
||||
|
||||
func (s *handler) id(r *http.Request) string {
|
||||
id := path.Base(r.URL.Path)
|
||||
return strings.TrimPrefix(id, "id:")
|
||||
base := path.Base(r.URL.Path)
|
||||
id := strings.TrimPrefix(base, "id:")
|
||||
if id == base {
|
||||
return "" // trigger 404 Not Found w/o id: prefix
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
func newID(kind string) string {
|
||||
|
@ -321,21 +438,7 @@ func (s *handler) association(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if spec.TagID != "" {
|
||||
if _, exists := s.Association[spec.TagID]; !exists {
|
||||
log.Printf("association tag not found: %s", spec.TagID)
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
switch s.action(r) {
|
||||
case "attach":
|
||||
s.Association[spec.TagID][*spec.ObjectID] = true
|
||||
s.ok(w)
|
||||
case "detach":
|
||||
delete(s.Association[spec.TagID], *spec.ObjectID)
|
||||
s.ok(w)
|
||||
case "list-attached-tags":
|
||||
var ids []string
|
||||
for id, objs := range s.Association {
|
||||
|
@ -344,9 +447,37 @@ func (s *handler) association(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
s.ok(w, ids)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *handler) associationID(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
id := s.id(r)
|
||||
if _, exists := s.Association[id]; !exists {
|
||||
log.Printf("association tag not found: %s", id)
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
var spec internal.Association
|
||||
if !s.decode(r, w, &spec) {
|
||||
return
|
||||
}
|
||||
|
||||
switch s.action(r) {
|
||||
case "attach":
|
||||
s.Association[id][*spec.ObjectID] = true
|
||||
s.ok(w)
|
||||
case "detach":
|
||||
delete(s.Association[id], *spec.ObjectID)
|
||||
s.ok(w)
|
||||
case "list-attached-objects":
|
||||
var ids []internal.AssociatedObject
|
||||
for id := range s.Association[spec.TagID] {
|
||||
for id := range s.Association[id] {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
s.ok(w, ids)
|
||||
|
|
|
@ -42,8 +42,8 @@ func (c *Manager) AttachTag(ctx context.Context, tagID string, ref mo.Reference)
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
spec := internal.NewAssociation(id, ref)
|
||||
url := internal.URL(c, internal.AssociationPath).WithAction("attach")
|
||||
spec := internal.NewAssociation(ref)
|
||||
url := internal.URL(c, internal.AssociationPath).WithID(id).WithAction("attach")
|
||||
return c.Do(ctx, url.Request(http.MethodPost, spec), nil)
|
||||
}
|
||||
|
||||
|
@ -54,14 +54,14 @@ func (c *Manager) DetachTag(ctx context.Context, tagID string, ref mo.Reference)
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
spec := internal.NewAssociation(id, ref)
|
||||
url := internal.URL(c, internal.AssociationPath).WithAction("detach")
|
||||
spec := internal.NewAssociation(ref)
|
||||
url := internal.URL(c, internal.AssociationPath).WithID(id).WithAction("detach")
|
||||
return c.Do(ctx, url.Request(http.MethodPost, spec), nil)
|
||||
}
|
||||
|
||||
// ListAttachedTags fetches the array of tag IDs attached to the given object.
|
||||
func (c *Manager) ListAttachedTags(ctx context.Context, ref mo.Reference) ([]string, error) {
|
||||
spec := internal.NewAssociation("", ref)
|
||||
spec := internal.NewAssociation(ref)
|
||||
url := internal.URL(c, internal.AssociationPath).WithAction("list-attached-tags")
|
||||
var res []string
|
||||
return res, c.Do(ctx, url.Request(http.MethodPost, spec), &res)
|
||||
|
@ -91,12 +91,9 @@ func (c *Manager) ListAttachedObjects(ctx context.Context, tagID string) ([]mo.R
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
spec := internal.Association{
|
||||
TagID: id,
|
||||
}
|
||||
url := internal.URL(c, internal.AssociationPath).WithAction("list-attached-objects")
|
||||
url := internal.URL(c, internal.AssociationPath).WithID(id).WithAction("list-attached-objects")
|
||||
var res []internal.AssociatedObject
|
||||
if err := c.Do(ctx, url.Request(http.MethodPost, spec), &res); err != nil {
|
||||
if err := c.Do(ctx, url.Request(http.MethodPost, nil), &res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -137,6 +137,30 @@ func (c *Manager) GetTag(ctx context.Context, id string) (*Tag, error) {
|
|||
|
||||
}
|
||||
|
||||
// GetTagForCategory fetches the tag information for the given identifier in the given category.
|
||||
func (c *Manager) GetTagForCategory(ctx context.Context, id, category string) (*Tag, error) {
|
||||
if category == "" {
|
||||
return c.GetTag(ctx, id)
|
||||
}
|
||||
|
||||
ids, err := c.ListTagsForCategory(ctx, category)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, id := range ids {
|
||||
tag, err := c.GetTag(ctx, id)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get tag for category %s %s: %s", category, id, err)
|
||||
}
|
||||
if tag.ID == id || tag.Name == id {
|
||||
return tag, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("tag %q not found in category %q", id, category)
|
||||
}
|
||||
|
||||
// ListTags returns all tag IDs in the system.
|
||||
func (c *Manager) ListTags(ctx context.Context) ([]string, error) {
|
||||
url := internal.URL(c, internal.TagPath)
|
||||
|
|
|
@ -218,6 +218,9 @@ func assignValue(val reflect.Value, fi []int, pv reflect.Value) {
|
|||
} else {
|
||||
panic(fmt.Sprintf("type %s doesn't implement %s", pt.Name(), rt.Name()))
|
||||
}
|
||||
} else if rt.Kind() == reflect.Struct && pt.Kind() == reflect.Ptr {
|
||||
pv = pv.Elem()
|
||||
pt = pv.Type()
|
||||
}
|
||||
|
||||
if pt.AssignableTo(rt) {
|
||||
|
|
|
@ -26,11 +26,11 @@ import (
|
|||
)
|
||||
|
||||
type readerReport struct {
|
||||
t time.Time
|
||||
pos int64 // Keep first to ensure 64-bit alignment
|
||||
size int64 // Keep first to ensure 64-bit alignment
|
||||
bps *uint64 // Keep first to ensure 64-bit alignment
|
||||
|
||||
pos int64
|
||||
size int64
|
||||
bps *uint64
|
||||
t time.Time
|
||||
|
||||
err error
|
||||
}
|
||||
|
|
|
@ -656,6 +656,8 @@ func (c *Client) Upload(ctx context.Context, f io.Reader, u *url.URL, param *Upl
|
|||
return err
|
||||
}
|
||||
|
||||
defer res.Body.Close()
|
||||
|
||||
switch res.StatusCode {
|
||||
case http.StatusOK:
|
||||
case http.StatusCreated:
|
||||
|
|
|
@ -71,7 +71,7 @@ func defaultResourceAllocationInfo() ResourceAllocationInfo {
|
|||
return ResourceAllocationInfo{
|
||||
Reservation: NewInt64(0),
|
||||
ExpandableReservation: NewBool(true),
|
||||
Limit: NewInt64(-1),
|
||||
Limit: NewInt64(-1),
|
||||
Shares: &SharesInfo{
|
||||
Level: SharesLevelNormal,
|
||||
},
|
||||
|
|
|
@ -786,6 +786,7 @@ func (b *DvsFilterConfig) GetDvsFilterConfig() *DvsFilterConfig { return b }
|
|||
|
||||
type BaseDvsFilterConfig interface {
|
||||
GetDvsFilterConfig() *DvsFilterConfig
|
||||
GetDvsTrafficFilterConfig() *DvsTrafficFilterConfig
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -828,12 +829,21 @@ func (b *DvsNetworkRuleQualifier) GetDvsNetworkRuleQualifier() *DvsNetworkRuleQu
|
|||
|
||||
type BaseDvsNetworkRuleQualifier interface {
|
||||
GetDvsNetworkRuleQualifier() *DvsNetworkRuleQualifier
|
||||
GetDvsIpNetworkRuleQualifier() *DvsIpNetworkRuleQualifier
|
||||
}
|
||||
|
||||
func init() {
|
||||
t["BaseDvsNetworkRuleQualifier"] = reflect.TypeOf((*DvsNetworkRuleQualifier)(nil)).Elem()
|
||||
}
|
||||
|
||||
func (b *DvsIpNetworkRuleQualifier) GetDvsIpNetworkRuleQualifier() *DvsIpNetworkRuleQualifier {
|
||||
return b
|
||||
}
|
||||
|
||||
type BaseDvsIpNetworkRuleQualifier interface {
|
||||
GetDvsIpNetworkRuleQualifier() *DvsIpNetworkRuleQualifier
|
||||
}
|
||||
|
||||
func (b *DvsTrafficFilterConfig) GetDvsTrafficFilterConfig() *DvsTrafficFilterConfig { return b }
|
||||
|
||||
type BaseDvsTrafficFilterConfig interface {
|
||||
|
|
Loading…
Reference in New Issue