adding support for gpus in node e2e

Signed-off-by: Vishnu kannan <vishnuk@google.com>
pull/6/head
Vishnu kannan 2017-04-28 10:37:03 -07:00
parent 98b5e9d57b
commit d1b4dba440
26 changed files with 121747 additions and 7474 deletions

128
Godeps/Godeps.json generated
View File

@ -1,7 +1,7 @@
{
"ImportPath": "k8s.io/kubernetes",
"GoVersion": "go1.7",
"GodepVersion": "v74",
"GodepVersion": "v79",
"Packages": [
"github.com/ugorji/go/codec/codecgen",
"github.com/onsi/ginkgo/ginkgo",
@ -823,12 +823,12 @@
},
{
"ImportPath": "github.com/docker/distribution/digest",
"Comment": "v2.4.0-rc.1-38-gcd27f17",
"Comment": "v2.4.0-rc.1-38-gcd27f179",
"Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51"
},
{
"ImportPath": "github.com/docker/distribution/reference",
"Comment": "v2.4.0-rc.1-38-gcd27f17",
"Comment": "v2.4.0-rc.1-38-gcd27f179",
"Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51"
},
{
@ -1076,127 +1076,127 @@
},
{
"ImportPath": "github.com/gogo/protobuf/gogoproto",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/compare",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/defaultcheck",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/description",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/embedcheck",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/enumstringer",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/equal",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/face",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/gostring",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/marshalto",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/oneofcheck",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/populate",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/size",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/stringer",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/testgen",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/union",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/unmarshal",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/proto",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/generator",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/grpc",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/plugin",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/sortkeys",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/vanity",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/vanity/command",
"Comment": "v0.4-3-gc0656ed",
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
@ -1655,17 +1655,17 @@
},
{
"ImportPath": "github.com/heketi/heketi/client/api/go-client",
"Comment": "2017-03-08T23:29:50Z",
"Comment": "v4.0.0-22-g7a54b6f",
"Rev": "7a54b6fc903feab1e7cb6573177ca09b544eb1e2"
},
{
"ImportPath": "github.com/heketi/heketi/pkg/glusterfs/api",
"Comment": "2017-03-08T23:29:50Z",
"Comment": "v4.0.0-22-g7a54b6f",
"Rev": "7a54b6fc903feab1e7cb6573177ca09b544eb1e2"
},
{
"ImportPath": "github.com/heketi/heketi/pkg/utils",
"Comment": "2017-03-08T23:29:50Z",
"Comment": "v4.0.0-22-g7a54b6f",
"Rev": "7a54b6fc903feab1e7cb6573177ca09b544eb1e2"
},
{
@ -2182,107 +2182,107 @@
},
{
"ImportPath": "github.com/rackspace/gophercloud",
"Comment": "v1.0.0-1012-ge00690e",
"Comment": "v1.0.0-1012-ge00690e8",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack",
"Comment": "v1.0.0-1012-ge00690e",
"Comment": "v1.0.0-1012-ge00690e8",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes",
"Comment": "v1.0.0-1012-ge00690e",
"Comment": "v1.0.0-1012-ge00690e8",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume",
"Comment": "v1.0.0-1012-ge00690e",
"Comment": "v1.0.0-1012-ge00690e8",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig",
"Comment": "v1.0.0-1012-ge00690e",
"Comment": "v1.0.0-1012-ge00690e8",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach",
"Comment": "v1.0.0-1012-ge00690e",
"Comment": "v1.0.0-1012-ge00690e8",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/flavors",
"Comment": "v1.0.0-1012-ge00690e",
"Comment": "v1.0.0-1012-ge00690e8",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/images",
"Comment": "v1.0.0-1012-ge00690e",
"Comment": "v1.0.0-1012-ge00690e8",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/servers",
"Comment": "v1.0.0-1012-ge00690e",
"Comment": "v1.0.0-1012-ge00690e8",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/identity/v2/tenants",
"Comment": "v1.0.0-1012-ge00690e",
"Comment": "v1.0.0-1012-ge00690e8",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/identity/v2/tokens",
"Comment": "v1.0.0-1012-ge00690e",
"Comment": "v1.0.0-1012-ge00690e8",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/identity/v3/tokens",
"Comment": "v1.0.0-1012-ge00690e",
"Comment": "v1.0.0-1012-ge00690e8",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/utils",
"Comment": "v1.0.0-1012-ge00690e",
"Comment": "v1.0.0-1012-ge00690e8",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/pagination",
"Comment": "v1.0.0-1012-ge00690e",
"Comment": "v1.0.0-1012-ge00690e8",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/rackspace",
"Comment": "v1.0.0-1012-ge00690e",
"Comment": "v1.0.0-1012-ge00690e8",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes",
"Comment": "v1.0.0-1012-ge00690e",
"Comment": "v1.0.0-1012-ge00690e8",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/rackspace/compute/v2/servers",
"Comment": "v1.0.0-1012-ge00690e",
"Comment": "v1.0.0-1012-ge00690e8",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach",
"Comment": "v1.0.0-1012-ge00690e",
"Comment": "v1.0.0-1012-ge00690e8",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/rackspace/identity/v2/tokens",
"Comment": "v1.0.0-1012-ge00690e",
"Comment": "v1.0.0-1012-ge00690e8",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/testhelper",
"Comment": "v1.0.0-1012-ge00690e",
"Comment": "v1.0.0-1012-ge00690e8",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/testhelper/client",
"Comment": "v1.0.0-1012-ge00690e",
"Comment": "v1.0.0-1012-ge00690e8",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
@ -2698,43 +2698,47 @@
},
{
"ImportPath": "google.golang.org/api/cloudmonitoring/v2beta2",
"Rev": "64485db7e8c8be51e572801d06cdbcfadd3546c1"
"Rev": "e3824ed33c72bf7e81da0286772c34b987520914"
},
{
"ImportPath": "google.golang.org/api/compute/v0.alpha",
"Rev": "64485db7e8c8be51e572801d06cdbcfadd3546c1"
"Rev": "e3824ed33c72bf7e81da0286772c34b987520914"
},
{
"ImportPath": "google.golang.org/api/compute/v0.beta",
"Rev": "e3824ed33c72bf7e81da0286772c34b987520914"
},
{
"ImportPath": "google.golang.org/api/compute/v1",
"Rev": "64485db7e8c8be51e572801d06cdbcfadd3546c1"
"Rev": "e3824ed33c72bf7e81da0286772c34b987520914"
},
{
"ImportPath": "google.golang.org/api/container/v1",
"Rev": "64485db7e8c8be51e572801d06cdbcfadd3546c1"
"Rev": "e3824ed33c72bf7e81da0286772c34b987520914"
},
{
"ImportPath": "google.golang.org/api/dns/v1",
"Rev": "64485db7e8c8be51e572801d06cdbcfadd3546c1"
"Rev": "e3824ed33c72bf7e81da0286772c34b987520914"
},
{
"ImportPath": "google.golang.org/api/gensupport",
"Rev": "64485db7e8c8be51e572801d06cdbcfadd3546c1"
"Rev": "e3824ed33c72bf7e81da0286772c34b987520914"
},
{
"ImportPath": "google.golang.org/api/googleapi",
"Rev": "64485db7e8c8be51e572801d06cdbcfadd3546c1"
"Rev": "e3824ed33c72bf7e81da0286772c34b987520914"
},
{
"ImportPath": "google.golang.org/api/googleapi/internal/uritemplates",
"Rev": "64485db7e8c8be51e572801d06cdbcfadd3546c1"
"Rev": "e3824ed33c72bf7e81da0286772c34b987520914"
},
{
"ImportPath": "google.golang.org/api/logging/v2beta1",
"Rev": "64485db7e8c8be51e572801d06cdbcfadd3546c1"
"Rev": "e3824ed33c72bf7e81da0286772c34b987520914"
},
{
"ImportPath": "google.golang.org/api/monitoring/v3",
"Rev": "64485db7e8c8be51e572801d06cdbcfadd3546c1"
"Rev": "e3824ed33c72bf7e81da0286772c34b987520914"
},
{
"ImportPath": "google.golang.org/grpc",

35
Godeps/LICENSES generated
View File

@ -81823,6 +81823,41 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================================
================================================================================
= vendor/google.golang.org/api/compute/v0.beta licensed under: =
Copyright (c) 2011 Google Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= vendor/google.golang.org/api/LICENSE a651bb3d8b1c412632e28823bb432b40 -
================================================================================
================================================================================
= vendor/google.golang.org/api/compute/v1 licensed under: =

View File

@ -237,6 +237,7 @@ define TEST_E2E_NODE_HELP_INFO
# Used when RUNTIME is set to "remote".
# IMAGE_SERVICE_ENDPOINT: remote image endpoint to connect to, to prepull images.
# Used when RUNTIME is set to "remote".
# IMAGE_CONFIG_FILE: path to a file containing image configuration.
#
# Example:
# make test-e2e-node FOCUS=Kubelet SKIP=container

View File

@ -71,7 +71,8 @@ if [ $remote = true ] ; then
exit 0
fi
gubernator=${GUBERNATOR:-"false"}
if [[ $hosts == "" && $images == "" ]]; then
image_config_file=${IMAGE_CONFIG_FILE:-""}
if [[ $hosts == "" && $images == "" && $image_config_file == "" ]]; then
image_project=${IMAGE_PROJECT:-"google-containers"}
gci_image=$(gcloud compute images list --project $image_project \
--no-standard-images --regexp="gci-dev.*" --format="table[no-heading](name)")
@ -126,6 +127,7 @@ if [ $remote = true ] ; then
echo "Hosts: $hosts"
echo "Ginkgo Flags: $ginkgoflags"
echo "Instance Metadata: $metadata"
echo "Image Config File: $image_config_file"
# Invoke the runner
go run test/e2e_node/runner/remote/run_remote.go --logtostderr --vmodule=*=4 --ssh-env="gce" \
--zone="$zone" --project="$project" --gubernator="$gubernator" \
@ -133,6 +135,7 @@ if [ $remote = true ] ; then
--results-dir="$artifacts" --ginkgo-flags="$ginkgoflags" \
--image-project="$image_project" --instance-name-prefix="$instance_prefix" \
--delete-instances="$delete_instances" --test_args="$test_args" --instance-metadata="$metadata" \
--image-config-file="$image_config_file" \
2>&1 | tee -i "${artifacts}/build-log.txt"
exit $?

View File

@ -0,0 +1,28 @@
# To copy an image between projects:
# `gcloud compute --project <to-project> disks create <image name> --image=https://www.googleapis.com/compute/v1/projects/<from-project>/global/images/<image-name>`
# `gcloud compute --project <to-project> images create <image-name> --source-disk=<image-name>`
images:
ubuntu-docker10:
image: e2e-node-ubuntu-trusty-docker10-v2-image # docker 1.10.3
project: kubernetes-node-e2e-images
ubuntu-docker12:
image: e2e-node-ubuntu-trusty-docker12-v2-image # docker 1.12.6
project: kubernetes-node-e2e-images
# Using `n1-standard-2` because GPU driver installation takes up significant amount of CPU.
machine: n1-standard-2
metadata: 'startup-script<test/e2e_node/jenkins/ubuntu-14.04-nvidia-install.sh'
resources:
accelerators:
- type: nvidia-tesla-k80
count: 2
coreos-alpha:
image: coreos-alpha-1122-0-0-v20160727 # docker 1.11.2
project: coreos-cloud
metadata: "user-data<test/e2e_node/jenkins/coreos-init.json"
containervm:
image: e2e-node-containervm-v20161208-image # docker 1.11.2
project: kubernetes-node-e2e-images
gci:
image_regex: gci-stable-56-9000-84-2 # docker 1.11.2
project: google-containers
metadata: "user-data<test/e2e_node/jenkins/gci-init.yaml,gci-update-strategy=update_disabled"

View File

@ -1,6 +1,6 @@
GCE_HOSTS=
GCE_IMAGE_CONFIG_PATH=test/e2e_node/jenkins/image-config.yaml
GCE_ZONE=us-central1-f
GCE_IMAGE_CONFIG_PATH=test/e2e_node/jenkins/image-config-serial.yaml
GCE_ZONE=us-west1-d
GCE_PROJECT=k8s-jkns-ci-node-e2e
CLEANUP=true
GINKGO_FLAGS='--focus="\[Serial\]" --skip="\[Flaky\]|\[Benchmark\]"'

View File

@ -0,0 +1,29 @@
#!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is meant to install Nvidia drivers on Ubuntu 14.04 GCE VMs.
# This script is meant to facilitate testing of Nvidia GPU support in Kubernetes.
echo "Checking for CUDA and installing."
# Check for CUDA and try to install.
if ! dpkg-query -W cuda; then
curl -O http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1404/x86_64/cuda-repo-ubuntu1404_8.0.61-1_amd64.deb
dpkg -i ./cuda-repo-ubuntu1404_8.0.61-1_amd64.deb
apt-get update
apt-get install cuda -y
apt-get install linux-headers-$(uname -r) -y
fi
## Pre-loads kernel modules
nvidia-smi

View File

@ -25,7 +25,7 @@ go_library(
"//vendor/github.com/pborman/uuid:go_default_library",
"//vendor/golang.org/x/oauth2:go_default_library",
"//vendor/golang.org/x/oauth2/google:go_default_library",
"//vendor/google.golang.org/api/compute/v1:go_default_library",
"//vendor/google.golang.org/api/compute/v0.beta:go_default_library",
],
)

View File

@ -41,7 +41,7 @@ import (
"github.com/pborman/uuid"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
compute "google.golang.org/api/compute/v1"
compute "google.golang.org/api/compute/v0.beta"
)
var testArgs = flag.String("test_args", "", "Space-separated list of arguments to pass to Ginkgo test runner.")
@ -60,7 +60,8 @@ var gubernator = flag.Bool("gubernator", false, "If true, output Gubernator link
var ginkgoFlags = flag.String("ginkgo-flags", "", "Passed to ginkgo to specify additional flags such as --skip=.")
const (
defaultMachine = "n1-standard-1"
defaultMachine = "n1-standard-1"
acceleratorTypeResourceFormat = "https://www.googleapis.com/compute/beta/projects/%s/zones/%s/acceleratorTypes/%s"
)
var (
@ -100,6 +101,15 @@ type ImageConfig struct {
Images map[string]GCEImage `json:"images"`
}
type Accelerator struct {
Type string `json:"type,omitempty"`
Count int64 `json:"count, omitempty"`
}
type Resources struct {
Accelerators []Accelerator `json:"accelerators,omitempty"`
}
type GCEImage struct {
Image string `json:"image, omitempty"`
Project string `json:"project"`
@ -109,7 +119,8 @@ type GCEImage struct {
// If the number of existing previous images is lesser than what is desired, the test will use that is available.
PreviousImages int `json:"previous_images, omitempty"`
Machine string `json:"machine, omitempty"`
Machine string `json:"machine, omitempty"`
Resources Resources `json:"resources, omitempty"`
// This test is for benchmark (no limit verification, more result log, node name has format 'machine-image-uuid') if 'Tests' is non-empty.
Tests []string `json:"tests, omitempty"`
}
@ -119,11 +130,12 @@ type internalImageConfig struct {
}
type internalGCEImage struct {
image string
project string
metadata *compute.Metadata
machine string
tests []string
image string
project string
resources *Resources
metadata *compute.Metadata
machine string
tests []string
}
// parseFlags parse subcommands and flags
@ -192,11 +204,12 @@ func main() {
}
for _, image := range images {
gceImage := internalGCEImage{
image: image,
project: imageConfig.Project,
metadata: getImageMetadata(imageConfig.Metadata),
machine: imageConfig.Machine,
tests: imageConfig.Tests,
image: image,
project: imageConfig.Project,
metadata: getImageMetadata(imageConfig.Metadata),
machine: imageConfig.Machine,
tests: imageConfig.Tests,
resources: &imageConfig.Resources,
}
if isRegex && len(images) > 1 {
// Use image name when shortName is not unique.
@ -347,7 +360,7 @@ func getImageMetadata(input string) *compute.Metadata {
val := v
metadataItems = append(metadataItems, &compute.MetadataItems{
Key: k,
Value: &val,
Value: val,
})
}
ret := compute.Metadata{Items: metadataItems}
@ -514,13 +527,36 @@ func createInstance(imageConfig *internalGCEImage) (string, error) {
},
},
}
i.Metadata = imageConfig.metadata
op, err := computeService.Instances.Insert(*project, *zone, i).Do()
if err != nil {
return "", err
for _, accelerator := range imageConfig.resources.Accelerators {
if i.GuestAccelerators == nil {
i.GuestAccelerators = []*compute.AcceleratorConfig{}
i.Scheduling = &compute.Scheduling{
OnHostMaintenance: "TERMINATE",
AutomaticRestart: true,
}
}
aType := fmt.Sprintf(acceleratorTypeResourceFormat, *project, *zone, accelerator.Type)
ac := &compute.AcceleratorConfig{
AcceleratorCount: accelerator.Count,
AcceleratorType: aType,
}
i.GuestAccelerators = append(i.GuestAccelerators, ac)
}
if op.Error != nil {
return "", fmt.Errorf("could not create instance %s: %+v", name, op.Error)
var err error
i.Metadata = imageConfig.metadata
if _, err := computeService.Instances.Get(*project, *zone, i.Name).Do(); err != nil {
op, err := computeService.Instances.Insert(*project, *zone, i).Do()
if err != nil {
ret := fmt.Sprintf("could not create instance %s: API error: %v", name, err)
if op != nil {
ret = fmt.Sprintf("%s: %v", ret, op.Error)
}
return "", fmt.Errorf(ret)
} else if op.Error != nil {
return "", fmt.Errorf("could not create instance %s: %+v", name, op.Error)
}
}
instanceRunning := false

1
vendor/BUILD vendored
View File

@ -344,6 +344,7 @@ filegroup(
"//vendor/golang.org/x/time/rate:all-srcs",
"//vendor/google.golang.org/api/cloudmonitoring/v2beta2:all-srcs",
"//vendor/google.golang.org/api/compute/v0.alpha:all-srcs",
"//vendor/google.golang.org/api/compute/v0.beta:all-srcs",
"//vendor/google.golang.org/api/compute/v1:all-srcs",
"//vendor/google.golang.org/api/container/v1:all-srcs",
"//vendor/google.golang.org/api/dns/v1:all-srcs",

View File

@ -6,3 +6,4 @@ mliyazud@redhat.com
nerawat@redhat.com
obnox@redhat.com
obnox@samba.org
lpabon@gmail.com

View File

@ -67,10 +67,9 @@ func New(client *http.Client) (*Service, error) {
}
type Service struct {
client *http.Client
BasePath string // API endpoint base URL
UserAgent string // optional additional User-Agent fragment
GoogleClientHeaderElement string // client header fragment, for Google use only
client *http.Client
BasePath string // API endpoint base URL
UserAgent string // optional additional User-Agent fragment
MetricDescriptors *MetricDescriptorsService
@ -86,10 +85,6 @@ func (s *Service) userAgent() string {
return googleapi.UserAgent + " " + s.UserAgent
}
func (s *Service) clientHeader() string {
return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement)
}
func NewMetricDescriptorsService(s *Service) *MetricDescriptorsService {
rs := &MetricDescriptorsService{s: s}
return rs
@ -1019,7 +1014,6 @@ func (c *MetricDescriptorsCreateCall) doRequest(alt string) (*http.Response, err
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.metricdescriptor)
if err != nil {
@ -1154,7 +1148,6 @@ func (c *MetricDescriptorsDeleteCall) doRequest(alt string) (*http.Response, err
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/metricDescriptors/{metric}")
@ -1332,7 +1325,6 @@ func (c *MetricDescriptorsListCall) doRequest(alt string) (*http.Response, error
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -1610,7 +1602,6 @@ func (c *TimeseriesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -1841,7 +1832,6 @@ func (c *TimeseriesWriteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.writetimeseriesrequest)
if err != nil {
@ -2082,7 +2072,6 @@ func (c *TimeseriesDescriptorsListCall) doRequest(alt string) (*http.Response, e
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

33
vendor/google.golang.org/api/compute/v0.beta/BUILD generated vendored Normal file
View File

@ -0,0 +1,33 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["compute-gen.go"],
tags = ["automanaged"],
deps = [
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/golang.org/x/net/context/ctxhttp:go_default_library",
"//vendor/google.golang.org/api/gensupport:go_default_library",
"//vendor/google.golang.org/api/googleapi:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -73,10 +73,9 @@ func New(client *http.Client) (*Service, error) {
}
type Service struct {
client *http.Client
BasePath string // API endpoint base URL
UserAgent string // optional additional User-Agent fragment
GoogleClientHeaderElement string // client header fragment, for Google use only
client *http.Client
BasePath string // API endpoint base URL
UserAgent string // optional additional User-Agent fragment
Changes *ChangesService
@ -94,10 +93,6 @@ func (s *Service) userAgent() string {
return googleapi.UserAgent + " " + s.UserAgent
}
func (s *Service) clientHeader() string {
return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement)
}
func NewChangesService(s *Service) *ChangesService {
rs := &ChangesService{s: s}
return rs
@ -593,7 +588,6 @@ func (c *ChangesCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.change)
if err != nil {
@ -749,7 +743,6 @@ func (c *ChangesGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -941,7 +934,6 @@ func (c *ChangesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -1130,7 +1122,6 @@ func (c *ManagedZonesCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedzone)
if err != nil {
@ -1265,7 +1256,6 @@ func (c *ManagedZonesDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}")
@ -1383,7 +1373,6 @@ func (c *ManagedZonesGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -1556,7 +1545,6 @@ func (c *ManagedZonesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -1734,7 +1722,6 @@ func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -1909,7 +1896,6 @@ func (c *ResourceRecordSetsListCall) doRequest(alt string) (*http.Response, erro
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}

File diff suppressed because it is too large Load Diff

View File

@ -77,10 +77,9 @@ func New(client *http.Client) (*Service, error) {
}
type Service struct {
client *http.Client
BasePath string // API endpoint base URL
UserAgent string // optional additional User-Agent fragment
GoogleClientHeaderElement string // client header fragment, for Google use only
client *http.Client
BasePath string // API endpoint base URL
UserAgent string // optional additional User-Agent fragment
BillingAccounts *BillingAccountsService
@ -100,10 +99,6 @@ func (s *Service) userAgent() string {
return googleapi.UserAgent + " " + s.UserAgent
}
func (s *Service) clientHeader() string {
return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement)
}
func NewBillingAccountsService(s *Service) *BillingAccountsService {
rs := &BillingAccountsService{s: s}
rs.Logs = NewBillingAccountsLogsService(s)
@ -365,20 +360,21 @@ type ListLogEntriesRequest struct {
// desc". The first option returns entries in order of increasing values
// of LogEntry.timestamp (oldest first), and the second option returns
// entries in order of decreasing timestamps (newest first). Entries
// with equal timestamps are returned in order of LogEntry.insertId.
// with equal timestamps are returned in order of their insert_id
// values.
OrderBy string `json:"orderBy,omitempty"`
// PageSize: Optional. The maximum number of results to return from this
// request. Non-positive values are ignored. The presence of
// nextPageToken in the response indicates that more results might be
// next_page_token in the response indicates that more results might be
// available.
PageSize int64 `json:"pageSize,omitempty"`
// PageToken: Optional. If present, then retrieve the next batch of
// results from the preceding call to this method. pageToken must be the
// value of nextPageToken from the previous response. The values of
// other method parameters should be identical to those in the previous
// call.
// results from the preceding call to this method. page_token must be
// the value of next_page_token from the previous response. The values
// of other method parameters should be identical to those in the
// previous call.
PageToken string `json:"pageToken,omitempty"`
// ProjectIds: Deprecated. Use resource_names instead. One or more
@ -388,13 +384,15 @@ type ListLogEntriesRequest struct {
// list of resources in resource_names.
ProjectIds []string `json:"projectIds,omitempty"`
// ResourceNames: Required. Names of one or more resources from which to
// retrieve log
// ResourceNames: Required. Names of one or more parent resources from
// which to retrieve log
// entries:
// "projects/[PROJECT_ID]"
// "organizations/[ORGANIZATION_ID]"
// Pro
// jects listed in the project_ids field are added to this list.
// "bi
// llingAccounts/[BILLING_ACCOUNT_ID]"
// "folders/[FOLDER_ID]"
// Projects listed in the project_ids field are added to this list.
ResourceNames []string `json:"resourceNames,omitempty"`
// ForceSendFields is a list of field names (e.g. "Filter") to
@ -625,11 +623,13 @@ type LogEntry struct {
// with this log entry, if applicable.
HttpRequest *HttpRequest `json:"httpRequest,omitempty"`
// InsertId: Optional. A unique ID for the log entry. If you provide
// this field, the logging service considers other log entries in the
// same project with the same ID as duplicates which can be removed. If
// omitted, Stackdriver Logging will generate a unique ID for this log
// entry.
// InsertId: Optional. A unique identifier for the log entry. If you
// provide a value, then Stackdriver Logging considers other log entries
// in the same project, with the same timestamp, and with the same
// insert_id to be duplicates which can be removed. If omitted in new
// log entries, then Stackdriver Logging will insert its own unique
// identifier. The insert_id is used to order log entries that have the
// same timestamp value.
InsertId string `json:"insertId,omitempty"`
// JsonPayload: The log entry payload, represented as a structure that
@ -646,6 +646,9 @@ type LogEntry struct {
// "projects/[PROJECT_ID]/logs/[LOG_ID]"
// "organizations/[ORGANIZ
// ATION_ID]/logs/[LOG_ID]"
// "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[L
// OG_ID]"
// "folders/[FOLDER_ID]/logs/[LOG_ID]"
// [LOG_ID] must be URL-encoded within log_name. Example:
// "organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Fa
// ctivity". [LOG_ID] must be less than 512 characters long and can only
@ -700,8 +703,10 @@ type LogEntry struct {
TextPayload string `json:"textPayload,omitempty"`
// Timestamp: Optional. The time the event described by the log entry
// occurred. If omitted, Stackdriver Logging will use the time the log
// entry is received.
// occurred. If omitted in a new log entry, Stackdriver Logging will
// insert the time the log entry is received. Stackdriver Logging might
// reject log entries whose time stamps are more than a couple of hours
// in the future. Log entries with time stamps in the past are accepted.
Timestamp string `json:"timestamp,omitempty"`
// Trace: Optional. Resource name of the trace associated with the log
@ -938,7 +943,7 @@ func (s *LogMetric) MarshalJSON() ([]byte, error) {
// following destinations in any project: a Cloud Storage bucket, a
// BigQuery dataset, or a Cloud Pub/Sub topic. A logs filter controls
// which log entries are exported. The sink must be created within a
// project or organization.
// project, organization, billing account, or folder.
type LogSink struct {
// Destination: Required. The export
// destination:
@ -968,6 +973,15 @@ type LogSink struct {
//
Filter string `json:"filter,omitempty"`
// IncludeChildren: Optional. This field presently applies only to sinks
// in organizations and folders. If true, then logs from children of
// this entity will also be available to this sink for export. Whether
// particular log entries from the children are exported depends on the
// sink's filter expression. For example, if this sink is associated
// with an organization, then logs from all projects in the organization
// as well as from the organization itself will be available for export.
IncludeChildren bool `json:"includeChildren,omitempty"`
// Name: Required. The client-assigned sink identifier, unique within
// the project. Example: "my-syslog-errors-to-pubsub". Sink identifiers
// are limited to 100 characters and can include only the following
@ -1380,11 +1394,15 @@ func (s *SourceReference) MarshalJSON() ([]byte, error) {
type WriteLogEntriesRequest struct {
// Entries: Required. The log entries to write. Values supplied for the
// fields log_name, resource, and labels in this entries.write request
// are added to those log entries that do not provide their own values
// for the fields.To improve throughput and to avoid exceeding the quota
// limit for calls to entries.write, you should write multiple log
// entries at once rather than calling this method for each individual
// log entry.
// are inserted into those log entries in this list that do not provide
// their own values.Stackdriver Logging also creates and inserts values
// for timestamp and insert_id if the entries do not provide them. The
// created insert_id for the N'th entry in this list will be greater
// than earlier entries and less than later entries. Otherwise, the
// order of log entries in this list does not matter.To improve
// throughput and to avoid exceeding the quota limit for calls to
// entries.write, you should write multiple log entries at once rather
// than calling this method for each individual log entry.
Entries []*LogEntry `json:"entries,omitempty"`
// Labels: Optional. Default labels that are added to the labels field
@ -1399,6 +1417,9 @@ type WriteLogEntriesRequest struct {
// "projects/[PROJECT_ID]/logs/[LOG_ID]"
// "organizations/[ORGANI
// ZATION_ID]/logs/[LOG_ID]"
// "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[
// LOG_ID]"
// "folders/[FOLDER_ID]/logs/[LOG_ID]"
// [LOG_ID] must be URL-encoded. For example,
// "projects/my-project-id/logs/syslog" or
// "organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Fa
@ -1407,10 +1428,10 @@ type WriteLogEntriesRequest struct {
// PartialSuccess: Optional. Whether valid entries should be written
// even if some other entries fail due to INVALID_ARGUMENT or
// PERMISSION_DENIED errors. If any entry is not written, the response
// status will be the error associated with one of the failed entries
// and include error details in the form of
// WriteLogEntriesPartialErrors.
// PERMISSION_DENIED errors. If any entry is not written, then the
// response status is the error associated with one of the failed
// entries and the response includes error details keyed by the entries'
// zero-based index in the entries.write method.
PartialSuccess bool `json:"partialSuccess,omitempty"`
// Resource: Optional. A default monitored resource object that is
@ -1503,7 +1524,6 @@ func (c *BillingAccountsLogsDeleteCall) doRequest(alt string) (*http.Response, e
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+logName}")
@ -1563,7 +1583,7 @@ func (c *BillingAccountsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty
// ],
// "parameters": {
// "logName": {
// "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.",
// "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.",
// "location": "path",
// "pattern": "^billingAccounts/[^/]+/logs/[^/]+$",
// "required": true,
@ -1593,8 +1613,8 @@ type BillingAccountsLogsListCall struct {
header_ http.Header
}
// List: Lists the logs in projects or organizations. Only logs that
// have entries are listed.
// List: Lists the logs in projects, organizations, folders, or billing
// accounts. Only logs that have entries are listed.
func (r *BillingAccountsLogsService) List(parent string) *BillingAccountsLogsListCall {
c := &BillingAccountsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
@ -1661,7 +1681,6 @@ func (c *BillingAccountsLogsListCall) doRequest(alt string) (*http.Response, err
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -1715,7 +1734,7 @@ func (c *BillingAccountsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLog
}
return ret, nil
// {
// "description": "Lists the logs in projects or organizations. Only logs that have entries are listed.",
// "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.",
// "flatPath": "v2beta1/billingAccounts/{billingAccountsId}/logs",
// "httpMethod": "GET",
// "id": "logging.billingAccounts.logs.list",
@ -1735,7 +1754,7 @@ func (c *BillingAccountsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLog
// "type": "string"
// },
// "parent": {
// "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n",
// "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n",
// "location": "path",
// "pattern": "^billingAccounts/[^/]+$",
// "required": true,
@ -1827,7 +1846,6 @@ func (c *EntriesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.listlogentriesrequest)
if err != nil {
@ -1934,8 +1952,7 @@ type EntriesWriteCall struct {
header_ http.Header
}
// Write: Writes log entries to Stackdriver Logging. All log entries are
// written by this method.
// Write: Writes log entries to Stackdriver Logging.
func (r *EntriesService) Write(writelogentriesrequest *WriteLogEntriesRequest) *EntriesWriteCall {
c := &EntriesWriteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.writelogentriesrequest = writelogentriesrequest
@ -1973,7 +1990,6 @@ func (c *EntriesWriteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.writelogentriesrequest)
if err != nil {
@ -2026,7 +2042,7 @@ func (c *EntriesWriteCall) Do(opts ...googleapi.CallOption) (*WriteLogEntriesRes
}
return ret, nil
// {
// "description": "Writes log entries to Stackdriver Logging. All log entries are written by this method.",
// "description": "Writes log entries to Stackdriver Logging.",
// "flatPath": "v2beta1/entries:write",
// "httpMethod": "POST",
// "id": "logging.entries.write",
@ -2125,7 +2141,6 @@ func (c *MonitoredResourceDescriptorsListCall) doRequest(alt string) (*http.Resp
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -2281,7 +2296,6 @@ func (c *OrganizationsLogsDeleteCall) doRequest(alt string) (*http.Response, err
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+logName}")
@ -2341,7 +2355,7 @@ func (c *OrganizationsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty,
// ],
// "parameters": {
// "logName": {
// "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.",
// "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.",
// "location": "path",
// "pattern": "^organizations/[^/]+/logs/[^/]+$",
// "required": true,
@ -2371,8 +2385,8 @@ type OrganizationsLogsListCall struct {
header_ http.Header
}
// List: Lists the logs in projects or organizations. Only logs that
// have entries are listed.
// List: Lists the logs in projects, organizations, folders, or billing
// accounts. Only logs that have entries are listed.
func (r *OrganizationsLogsService) List(parent string) *OrganizationsLogsListCall {
c := &OrganizationsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
@ -2439,7 +2453,6 @@ func (c *OrganizationsLogsListCall) doRequest(alt string) (*http.Response, error
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -2493,7 +2506,7 @@ func (c *OrganizationsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsR
}
return ret, nil
// {
// "description": "Lists the logs in projects or organizations. Only logs that have entries are listed.",
// "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.",
// "flatPath": "v2beta1/organizations/{organizationsId}/logs",
// "httpMethod": "GET",
// "id": "logging.organizations.logs.list",
@ -2513,7 +2526,7 @@ func (c *OrganizationsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsR
// "type": "string"
// },
// "parent": {
// "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n",
// "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n",
// "location": "path",
// "pattern": "^organizations/[^/]+$",
// "required": true,
@ -2605,7 +2618,6 @@ func (c *ProjectsLogsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+logName}")
@ -2665,7 +2677,7 @@ func (c *ProjectsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error
// ],
// "parameters": {
// "logName": {
// "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.",
// "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.",
// "location": "path",
// "pattern": "^projects/[^/]+/logs/[^/]+$",
// "required": true,
@ -2695,8 +2707,8 @@ type ProjectsLogsListCall struct {
header_ http.Header
}
// List: Lists the logs in projects or organizations. Only logs that
// have entries are listed.
// List: Lists the logs in projects, organizations, folders, or billing
// accounts. Only logs that have entries are listed.
func (r *ProjectsLogsService) List(parent string) *ProjectsLogsListCall {
c := &ProjectsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
@ -2763,7 +2775,6 @@ func (c *ProjectsLogsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -2817,7 +2828,7 @@ func (c *ProjectsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsRespon
}
return ret, nil
// {
// "description": "Lists the logs in projects or organizations. Only logs that have entries are listed.",
// "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.",
// "flatPath": "v2beta1/projects/{projectsId}/logs",
// "httpMethod": "GET",
// "id": "logging.projects.logs.list",
@ -2837,7 +2848,7 @@ func (c *ProjectsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsRespon
// "type": "string"
// },
// "parent": {
// "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n",
// "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
@ -2929,7 +2940,6 @@ func (c *ProjectsMetricsCreateCall) doRequest(alt string) (*http.Response, error
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.logmetric)
if err != nil {
@ -3065,7 +3075,6 @@ func (c *ProjectsMetricsDeleteCall) doRequest(alt string) (*http.Response, error
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+metricName}")
@ -3204,7 +3213,6 @@ func (c *ProjectsMetricsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -3366,7 +3374,6 @@ func (c *ProjectsMetricsListCall) doRequest(alt string) (*http.Response, error)
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -3532,7 +3539,6 @@ func (c *ProjectsMetricsUpdateCall) doRequest(alt string) (*http.Response, error
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.logmetric)
if err != nil {
@ -3648,8 +3654,8 @@ func (r *ProjectsSinksService) Create(parent string, logsink *LogSink) *Projects
// "uniqueWriterIdentity": Determines the kind of IAM identity returned
// as writer_identity in the new sink. If this value is omitted or set
// to false, and if the sink's parent is a project, then the value
// returned as writer_identity is cloud-logs@system.gserviceaccount.com,
// the same identity used before the addition of writer identities to
// returned as writer_identity is the same group or service account used
// by Stackdriver Logging before the addition of writer identities to
// this API. The sink's destination must be in the same project as the
// sink itself.If this field is set to true, or if the sink is owned by
// a non-project resource such as an organization, then the value of
@ -3692,7 +3698,6 @@ func (c *ProjectsSinksCreateCall) doRequest(alt string) (*http.Response, error)
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink)
if err != nil {
@ -3757,14 +3762,14 @@ func (c *ProjectsSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, er
// ],
// "parameters": {
// "parent": {
// "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".",
// "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// },
// "uniqueWriterIdentity": {
// "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is cloud-logs@system.gserviceaccount.com, the same identity used before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.",
// "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Stackdriver Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.",
// "location": "query",
// "type": "boolean"
// }
@ -3833,7 +3838,6 @@ func (c *ProjectsSinksDeleteCall) doRequest(alt string) (*http.Response, error)
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+sinkName}")
@ -3893,7 +3897,7 @@ func (c *ProjectsSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, erro
// ],
// "parameters": {
// "sinkName": {
// "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nIt is an error if the sink does not exist. Example: \"projects/my-project-id/sinks/my-sink-id\". It is an error if the sink does not exist.",
// "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".",
// "location": "path",
// "pattern": "^projects/[^/]+/sinks/[^/]+$",
// "required": true,
@ -3971,7 +3975,6 @@ func (c *ProjectsSinksGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -4034,7 +4037,7 @@ func (c *ProjectsSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink, error
// ],
// "parameters": {
// "sinkName": {
// "description": "Required. The parent resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".",
// "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".",
// "location": "path",
// "pattern": "^projects/[^/]+/sinks/[^/]+$",
// "required": true,
@ -4133,7 +4136,6 @@ func (c *ProjectsSinksListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -4207,7 +4209,7 @@ func (c *ProjectsSinksListCall) Do(opts ...googleapi.CallOption) (*ListSinksResp
// "type": "string"
// },
// "parent": {
// "description": "Required. The parent resource whose sinks are to be listed. Examples: \"projects/my-logging-project\", \"organizations/123456789\".",
// "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
@ -4281,9 +4283,9 @@ func (r *ProjectsSinksService) Update(sinkNameid string, logsink *LogSink) *Proj
// values of this field:
// If the old and new values of this field are both false or both true,
// then there is no change to the sink's writer_identity.
// If the old value was false and the new value is true, then
// If the old value is false and the new value is true, then
// writer_identity is changed to a unique service account.
// It is an error if the old value was true and the new value is false.
// It is an error if the old value is true and the new value is false.
func (c *ProjectsSinksUpdateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *ProjectsSinksUpdateCall {
c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity))
return c
@ -4320,7 +4322,6 @@ func (c *ProjectsSinksUpdateCall) doRequest(alt string) (*http.Response, error)
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink)
if err != nil {
@ -4385,14 +4386,14 @@ func (c *ProjectsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, er
// ],
// "parameters": {
// "sinkName": {
// "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".",
// "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".",
// "location": "path",
// "pattern": "^projects/[^/]+/sinks/[^/]+$",
// "required": true,
// "type": "string"
// },
// "uniqueWriterIdentity": {
// "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value was false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value was true and the new value is false.",
// "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is false.",
// "location": "query",
// "type": "boolean"
// }

File diff suppressed because it is too large Load Diff

View File

@ -72,10 +72,9 @@ func New(client *http.Client) (*Service, error) {
}
type Service struct {
client *http.Client
BasePath string // API endpoint base URL
UserAgent string // optional additional User-Agent fragment
GoogleClientHeaderElement string // client header fragment, for Google use only
client *http.Client
BasePath string // API endpoint base URL
UserAgent string // optional additional User-Agent fragment
Projects *ProjectsService
}
@ -87,10 +86,6 @@ func (s *Service) userAgent() string {
return googleapi.UserAgent + " " + s.UserAgent
}
func (s *Service) clientHeader() string {
return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement)
}
func NewProjectsService(s *Service) *ProjectsService {
rs := &ProjectsService{s: s}
rs.CollectdTimeSeries = NewProjectsCollectdTimeSeriesService(s)
@ -172,25 +167,21 @@ type ProjectsTimeSeriesService struct {
s *Service
}
// BucketOptions: A Distribution may optionally contain a histogram of
// the values in the population. The histogram is given in bucket_counts
// as counts of values that fall into one of a sequence of
// non-overlapping buckets. The sequence of buckets is described by
// bucket_options.A bucket specifies an inclusive lower bound and
// BucketOptions: BucketOptions describes the bucket boundaries used to
// create a histogram for the distribution. The buckets can be in a
// linear sequence, an exponential sequence, or each bucket can be
// specified explicitly. BucketOptions does not include the number of
// values in each bucket.A bucket has an inclusive lower bound and
// exclusive upper bound for the values that are counted for that
// bucket. The upper bound of a bucket is strictly greater than the
// lower bound.The sequence of N buckets for a Distribution consists of
// bucket. The upper bound of a bucket must be strictly greater than the
// lower bound. The sequence of N buckets for a distribution consists of
// an underflow bucket (number 0), zero or more finite buckets (number 1
// through N - 2) and an overflow bucket (number N - 1). The buckets are
// contiguous: the lower bound of bucket i (i > 0) is the same as the
// upper bound of bucket i - 1. The buckets span the whole range of
// finite values: lower bound of the underflow bucket is -infinity and
// the upper bound of the overflow bucket is +infinity. The finite
// buckets are so-called because both bounds are finite.BucketOptions
// describes bucket boundaries in one of three ways. Two describe the
// boundaries by giving parameters for a formula to generate boundaries
// and one gives the bucket boundaries explicitly.If bucket_options is
// not given, then no bucket_counts may be given.
// buckets are so-called because both bounds are finite.
type BucketOptions struct {
// ExplicitBuckets: The explicit buckets.
ExplicitBuckets *Explicit `json:"explicitBuckets,omitempty"`
@ -399,34 +390,36 @@ func (s *CreateTimeSeriesRequest) MarshalJSON() ([]byte, error) {
}
// Distribution: Distribution contains summary statistics for a
// population of values and, optionally, a histogram representing the
// distribution of those values across a specified set of histogram
// buckets.The summary statistics are the count, mean, sum of the
// squared deviation from the mean, the minimum, and the maximum of the
// set of population of values.The histogram is based on a sequence of
// buckets and gives a count of values that fall into each bucket. The
// boundaries of the buckets are given either explicitly or by
// specifying parameters for a method of computing them (buckets of
// fixed width or buckets of exponentially increasing width).Although it
// is not forbidden, it is generally a bad idea to include non-finite
// values (infinities or NaNs) in the population of values, as this will
// render the mean and sum_of_squared_deviation fields meaningless.
// population of values. It optionally contains a histogram representing
// the distribution of those values across a set of buckets.The summary
// statistics are the count, mean, sum of the squared deviation from the
// mean, the minimum, and the maximum of the set of population of
// values. The histogram is based on a sequence of buckets and gives a
// count of values that fall into each bucket. The boundaries of the
// buckets are given either explicitly or by formulas for buckets of
// fixed or exponentially increasing widths.Although it is not
// forbidden, it is generally a bad idea to include non-finite values
// (infinities or NaNs) in the population of values, as this will render
// the mean and sum_of_squared_deviation fields meaningless.
type Distribution struct {
// BucketCounts: If bucket_options is given, then the sum of the values
// in bucket_counts must equal the value in count. If bucket_options is
// not given, no bucket_counts fields may be given.Bucket counts are
// given in order under the numbering scheme described above (the
// underflow bucket has number 0; the finite buckets, if any, have
// numbers 1 through N-2; the overflow bucket has number N-1).The size
// of bucket_counts must be no greater than N as defined in
// bucket_options.Any suffix of trailing zero bucket_count fields may be
// omitted.
// BucketCounts: Required in the Stackdriver Monitoring API v3. The
// values for each bucket specified in bucket_options. The sum of the
// values in bucketCounts must equal the value in the count field of the
// Distribution object. The order of the bucket counts follows the
// numbering schemes described for the three bucket types. The underflow
// bucket has number 0; the finite buckets, if any, have numbers 1
// through N-2; and the overflow bucket has number N-1. The size of
// bucket_counts must not be greater than N. If the size is less than N,
// then the remaining buckets are assigned values of zero.
BucketCounts googleapi.Int64s `json:"bucketCounts,omitempty"`
// BucketOptions: Defines the histogram bucket boundaries.
// BucketOptions: Required in the Stackdriver Monitoring API v3. Defines
// the histogram bucket boundaries.
BucketOptions *BucketOptions `json:"bucketOptions,omitempty"`
// Count: The number of values in the population. Must be non-negative.
// This value must equal the sum of the values in bucket_counts if a
// histogram is provided.
Count int64 `json:"count,omitempty,string"`
// Mean: The arithmetic mean of the values in the population. If count
@ -501,12 +494,13 @@ type Empty struct {
googleapi.ServerResponse `json:"-"`
}
// Explicit: A set of buckets with arbitrary widths.Defines size(bounds)
// + 1 (= N) buckets with these boundaries for bucket i:Upper bound (0
// <= i < N-1): boundsi Lower bound (1 <= i < N); boundsi - 1There must
// be at least one element in bounds. If bounds has only one element,
// there are no finite buckets, and that single element is the common
// boundary of the overflow and underflow buckets.
// Explicit: Specifies a set of buckets with arbitrary widths.There are
// size(bounds) + 1 (= N) buckets. Bucket i has the following
// boundaries:Upper bound (0 <= i < N-1): boundsi Lower bound (1 <= i <
// N); boundsi - 1The bounds field must contain at least one element. If
// bounds has only one element, then there are no finite buckets, and
// that single element is the common boundary of the overflow and
// underflow buckets.
type Explicit struct {
// Bounds: The values must be monotonically increasing.
Bounds []float64 `json:"bounds,omitempty"`
@ -534,11 +528,11 @@ func (s *Explicit) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Exponential: Specify a sequence of buckets that have a width that is
// proportional to the value of the lower bound. Each bucket represents
// a constant relative uncertainty on a specific value in the
// bucket.Defines num_finite_buckets + 2 (= N) buckets with these
// boundaries for bucket i:Upper bound (0 <= i < N-1): scale *
// Exponential: Specifies an exponential sequence of buckets that have a
// width that is proportional to the value of the lower bound. Each
// bucket represents a constant relative uncertainty on a specific value
// in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket
// i has the following boundaries:Upper bound (0 <= i < N-1): scale *
// (growth_factor ^ i). Lower bound (1 <= i < N): scale *
// (growth_factor ^ (i - 1)).
type Exponential struct {
@ -793,12 +787,12 @@ func (s *LabelDescriptor) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Linear: Specify a sequence of buckets that all have the same width
// (except overflow and underflow). Each bucket represents a constant
// absolute uncertainty on the specific value in the bucket.Defines
// num_finite_buckets + 2 (= N) buckets with these boundaries for bucket
// i:Upper bound (0 <= i < N-1): offset + (width * i). Lower bound (1
// <= i < N): offset + (width * (i - 1)).
// Linear: Specifies a linear sequence of buckets that all have the same
// width (except overflow and underflow). Each bucket represents a
// constant absolute uncertainty on the specific value in the
// bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has
// the following boundaries:Upper bound (0 <= i < N-1): offset + (width
// * i). Lower bound (1 <= i < N): offset + (width * (i - 1)).
type Linear struct {
// NumFiniteBuckets: Must be greater than 0.
NumFiniteBuckets int64 `json:"numFiniteBuckets,omitempty"`
@ -966,7 +960,7 @@ func (s *ListMetricDescriptorsResponse) MarshalJSON() ([]byte, error) {
}
// ListMonitoredResourceDescriptorsResponse: The
// ListMonitoredResourcDescriptors response.
// ListMonitoredResourceDescriptors response.
type ListMonitoredResourceDescriptorsResponse struct {
// NextPageToken: If there are more results than have been returned,
// then this field is set to a non-empty value. To see the additional
@ -1244,13 +1238,13 @@ func (s *MetricDescriptor) MarshalJSON() ([]byte, error) {
//
type MonitoredResource struct {
// Labels: Required. Values for all of the labels listed in the
// associated monitored resource descriptor. For example, Cloud SQL
// databases use the labels "database_id" and "zone".
// associated monitored resource descriptor. For example, Compute Engine
// VM instances use the labels "project_id", "instance_id", and "zone".
Labels map[string]string `json:"labels,omitempty"`
// Type: Required. The monitored resource type. This field must match
// the type field of a MonitoredResourceDescriptor object. For example,
// the type of a Cloud SQL database is "cloudsql_database".
// the type of a Compute Engine VM instance is gce_instance.
Type string `json:"type,omitempty"`
// ForceSendFields is a list of field names (e.g. "Labels") to
@ -1571,8 +1565,8 @@ type TimeSeries struct {
// type, which must be BOOL, INT64, DOUBLE, or DISTRIBUTION.
Points []*Point `json:"points,omitempty"`
// Resource: The associated resource. A fully-specified monitored
// resource used to identify the time series.
// Resource: The associated monitored resource. Custom metrics can use
// only certain monitored resource types in their time series data.
Resource *MonitoredResource `json:"resource,omitempty"`
// ValueType: The value type of the time series. When listing time
@ -1777,7 +1771,6 @@ func (c *ProjectsCollectdTimeSeriesCreateCall) doRequest(alt string) (*http.Resp
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.createcollectdtimeseriesrequest)
if err != nil {
@ -1922,7 +1915,6 @@ func (c *ProjectsGroupsCreateCall) doRequest(alt string) (*http.Response, error)
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.group)
if err != nil {
@ -2062,7 +2054,6 @@ func (c *ProjectsGroupsDeleteCall) doRequest(alt string) (*http.Response, error)
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
@ -2200,7 +2191,6 @@ func (c *ProjectsGroupsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -2388,7 +2378,6 @@ func (c *ProjectsGroupsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -2576,7 +2565,6 @@ func (c *ProjectsGroupsUpdateCall) doRequest(alt string) (*http.Response, error)
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.group)
if err != nil {
@ -2770,7 +2758,6 @@ func (c *ProjectsGroupsMembersListCall) doRequest(alt string) (*http.Response, e
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -2953,7 +2940,6 @@ func (c *ProjectsMetricDescriptorsCreateCall) doRequest(alt string) (*http.Respo
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.metricdescriptor)
if err != nil {
@ -3090,7 +3076,6 @@ func (c *ProjectsMetricDescriptorsDeleteCall) doRequest(alt string) (*http.Respo
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
@ -3229,7 +3214,6 @@ func (c *ProjectsMetricDescriptorsGetCall) doRequest(alt string) (*http.Response
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -3400,7 +3384,6 @@ func (c *ProjectsMetricDescriptorsListCall) doRequest(alt string) (*http.Respons
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -3581,7 +3564,6 @@ func (c *ProjectsMonitoredResourceDescriptorsGetCall) doRequest(alt string) (*ht
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -3752,7 +3734,6 @@ func (c *ProjectsMonitoredResourceDescriptorsListCall) doRequest(alt string) (*h
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -3928,7 +3909,6 @@ func (c *ProjectsTimeSeriesCreateCall) doRequest(alt string) (*http.Response, er
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.createtimeseriesrequest)
if err != nil {
@ -4186,6 +4166,103 @@ func (c *ProjectsTimeSeriesListCall) PageToken(pageToken string) *ProjectsTimeSe
return c
}
// SecondaryAggregationAlignmentPeriod sets the optional parameter
// "secondaryAggregation.alignmentPeriod": The alignment period for
// per-time series alignment. If present, alignmentPeriod must be at
// least 60 seconds. After per-time series alignment, each time series
// will contain data points only on the period boundaries. If
// perSeriesAligner is not specified or equals ALIGN_NONE, then this
// field is ignored. If perSeriesAligner is specified and does not equal
// ALIGN_NONE, then this field must be defined; otherwise an error is
// returned.
func (c *ProjectsTimeSeriesListCall) SecondaryAggregationAlignmentPeriod(secondaryAggregationAlignmentPeriod string) *ProjectsTimeSeriesListCall {
c.urlParams_.Set("secondaryAggregation.alignmentPeriod", secondaryAggregationAlignmentPeriod)
return c
}
// SecondaryAggregationCrossSeriesReducer sets the optional parameter
// "secondaryAggregation.crossSeriesReducer": The approach to be used to
// combine time series. Not all reducer functions may be applied to all
// time series, depending on the metric type and the value type of the
// original time series. Reduction may change the metric type of value
// type of the time series.Time series data must be aligned in order to
// perform cross-time series reduction. If crossSeriesReducer is
// specified, then perSeriesAligner must be specified and not equal
// ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error
// is returned.
//
// Possible values:
// "REDUCE_NONE"
// "REDUCE_MEAN"
// "REDUCE_MIN"
// "REDUCE_MAX"
// "REDUCE_SUM"
// "REDUCE_STDDEV"
// "REDUCE_COUNT"
// "REDUCE_COUNT_TRUE"
// "REDUCE_FRACTION_TRUE"
// "REDUCE_PERCENTILE_99"
// "REDUCE_PERCENTILE_95"
// "REDUCE_PERCENTILE_50"
// "REDUCE_PERCENTILE_05"
func (c *ProjectsTimeSeriesListCall) SecondaryAggregationCrossSeriesReducer(secondaryAggregationCrossSeriesReducer string) *ProjectsTimeSeriesListCall {
c.urlParams_.Set("secondaryAggregation.crossSeriesReducer", secondaryAggregationCrossSeriesReducer)
return c
}
// SecondaryAggregationGroupByFields sets the optional parameter
// "secondaryAggregation.groupByFields": The set of fields to preserve
// when crossSeriesReducer is specified. The groupByFields determine how
// the time series are partitioned into subsets prior to applying the
// aggregation function. Each subset contains time series that have the
// same value for each of the grouping fields. Each individual time
// series is a member of exactly one subset. The crossSeriesReducer is
// applied to each subset of time series. It is not possible to reduce
// across different resource types, so this field implicitly contains
// resource.type. Fields not specified in groupByFields are aggregated
// away. If groupByFields is not specified and all the time series have
// the same resource type, then the time series are aggregated into a
// single output time series. If crossSeriesReducer is not defined, this
// field is ignored.
func (c *ProjectsTimeSeriesListCall) SecondaryAggregationGroupByFields(secondaryAggregationGroupByFields ...string) *ProjectsTimeSeriesListCall {
c.urlParams_.SetMulti("secondaryAggregation.groupByFields", append([]string{}, secondaryAggregationGroupByFields...))
return c
}
// SecondaryAggregationPerSeriesAligner sets the optional parameter
// "secondaryAggregation.perSeriesAligner": The approach to be used to
// align individual time series. Not all alignment functions may be
// applied to all time series, depending on the metric type and value
// type of the original time series. Alignment may change the metric
// type or the value type of the time series.Time series data must be
// aligned in order to perform cross-time series reduction. If
// crossSeriesReducer is specified, then perSeriesAligner must be
// specified and not equal ALIGN_NONE and alignmentPeriod must be
// specified; otherwise, an error is returned.
//
// Possible values:
// "ALIGN_NONE"
// "ALIGN_DELTA"
// "ALIGN_RATE"
// "ALIGN_INTERPOLATE"
// "ALIGN_NEXT_OLDER"
// "ALIGN_MIN"
// "ALIGN_MAX"
// "ALIGN_MEAN"
// "ALIGN_COUNT"
// "ALIGN_SUM"
// "ALIGN_STDDEV"
// "ALIGN_COUNT_TRUE"
// "ALIGN_FRACTION_TRUE"
// "ALIGN_PERCENTILE_99"
// "ALIGN_PERCENTILE_95"
// "ALIGN_PERCENTILE_50"
// "ALIGN_PERCENTILE_05"
func (c *ProjectsTimeSeriesListCall) SecondaryAggregationPerSeriesAligner(secondaryAggregationPerSeriesAligner string) *ProjectsTimeSeriesListCall {
c.urlParams_.Set("secondaryAggregation.perSeriesAligner", secondaryAggregationPerSeriesAligner)
return c
}
// View sets the optional parameter "view": Specifies which information
// is returned about the time series.
//
@ -4238,7 +4315,6 @@ func (c *ProjectsTimeSeriesListCall) doRequest(alt string) (*http.Response, erro
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
reqHeaders.Set("x-goog-api-client", c.s.clientHeader())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
@ -4396,6 +4472,62 @@ func (c *ProjectsTimeSeriesListCall) Do(opts ...googleapi.CallOption) (*ListTime
// "location": "query",
// "type": "string"
// },
// "secondaryAggregation.alignmentPeriod": {
// "description": "The alignment period for per-time series alignment. If present, alignmentPeriod must be at least 60 seconds. After per-time series alignment, each time series will contain data points only on the period boundaries. If perSeriesAligner is not specified or equals ALIGN_NONE, then this field is ignored. If perSeriesAligner is specified and does not equal ALIGN_NONE, then this field must be defined; otherwise an error is returned.",
// "format": "google-duration",
// "location": "query",
// "type": "string"
// },
// "secondaryAggregation.crossSeriesReducer": {
// "description": "The approach to be used to combine time series. Not all reducer functions may be applied to all time series, depending on the metric type and the value type of the original time series. Reduction may change the metric type of value type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If crossSeriesReducer is specified, then perSeriesAligner must be specified and not equal ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error is returned.",
// "enum": [
// "REDUCE_NONE",
// "REDUCE_MEAN",
// "REDUCE_MIN",
// "REDUCE_MAX",
// "REDUCE_SUM",
// "REDUCE_STDDEV",
// "REDUCE_COUNT",
// "REDUCE_COUNT_TRUE",
// "REDUCE_FRACTION_TRUE",
// "REDUCE_PERCENTILE_99",
// "REDUCE_PERCENTILE_95",
// "REDUCE_PERCENTILE_50",
// "REDUCE_PERCENTILE_05"
// ],
// "location": "query",
// "type": "string"
// },
// "secondaryAggregation.groupByFields": {
// "description": "The set of fields to preserve when crossSeriesReducer is specified. The groupByFields determine how the time series are partitioned into subsets prior to applying the aggregation function. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The crossSeriesReducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in groupByFields are aggregated away. If groupByFields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If crossSeriesReducer is not defined, this field is ignored.",
// "location": "query",
// "repeated": true,
// "type": "string"
// },
// "secondaryAggregation.perSeriesAligner": {
// "description": "The approach to be used to align individual time series. Not all alignment functions may be applied to all time series, depending on the metric type and value type of the original time series. Alignment may change the metric type or the value type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If crossSeriesReducer is specified, then perSeriesAligner must be specified and not equal ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error is returned.",
// "enum": [
// "ALIGN_NONE",
// "ALIGN_DELTA",
// "ALIGN_RATE",
// "ALIGN_INTERPOLATE",
// "ALIGN_NEXT_OLDER",
// "ALIGN_MIN",
// "ALIGN_MAX",
// "ALIGN_MEAN",
// "ALIGN_COUNT",
// "ALIGN_SUM",
// "ALIGN_STDDEV",
// "ALIGN_COUNT_TRUE",
// "ALIGN_FRACTION_TRUE",
// "ALIGN_PERCENTILE_99",
// "ALIGN_PERCENTILE_95",
// "ALIGN_PERCENTILE_50",
// "ALIGN_PERCENTILE_05"
// ],
// "location": "query",
// "type": "string"
// },
// "view": {
// "description": "Specifies which information is returned about the time series.",
// "enum": [