aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoryoshi-code-bot <70984784+yoshi-code-bot@users.noreply.github.com>2021-09-28 00:22:30 -0700
committerGitHub <noreply@github.com>2021-09-28 07:22:30 +0000
commitad81f3da411eb38e48137cb67f22a718498bcdc1 (patch)
tree175dea39f30597221280126deb48deb3cddc6960
parent95be169f05cb7d5559dbdf4ae907fbf4f558273c (diff)
downloadgoogle-api-python-client-ad81f3da411eb38e48137cb67f22a718498bcdc1.tar.gz
chore: Update discovery artifacts (#1541)
## Discovery Artifact Change Summary: feat(analyticsadmin): update the api https://github.com/googleapis/google-api-python-client/commit/c14c42a82fbd61df00b690daa328cea212441f59 feat(appengine): update the api https://github.com/googleapis/google-api-python-client/commit/22e6b63271836d2b195191c0711d3e815d7b3f29 feat(bigquery): update the api https://github.com/googleapis/google-api-python-client/commit/5325b3654e42e393911f088e9a8358aeaf733c03 feat(content): update the api https://github.com/googleapis/google-api-python-client/commit/df08fb1f3823a5edc96e6caebe24df66e943fa36 feat(dialogflow): update the api https://github.com/googleapis/google-api-python-client/commit/eaa0b250682d593572168427d92b0c3b9438a503 feat(firestore): update the api https://github.com/googleapis/google-api-python-client/commit/89ee485ce0646fb14d4f4e1d7aae095e504cf4be feat(gkehub): update the api https://github.com/googleapis/google-api-python-client/commit/982014c5e33c29f2e0030b950b2f2ac27afa3f8f feat(monitoring): update the api https://github.com/googleapis/google-api-python-client/commit/440201ddeeae876ab83863def611ec39649d397c fix(oslogin): update the api https://github.com/googleapis/google-api-python-client/commit/e940d95d04a6aba60b89ece3fd630cc0ab5cde2a feat(retail): update the api https://github.com/googleapis/google-api-python-client/commit/58f1c1ba076ed6ecc389ddf66d0c5ac609cd9d17 feat(servicenetworking): update the api https://github.com/googleapis/google-api-python-client/commit/53d51411d39049a98df6909ae16f9c5dfee4f432
-rw-r--r--docs/dyn/adexchangebuyer2_v2beta1.accounts.clients.html12
-rw-r--r--docs/dyn/analyticsadmin_v1alpha.accounts.html2
-rw-r--r--docs/dyn/analyticsadmin_v1alpha.properties.html7
-rw-r--r--docs/dyn/apigee_v1.organizations.apiproducts.rateplans.html14
-rw-r--r--docs/dyn/apigee_v1.organizations.html8
-rw-r--r--docs/dyn/appengine_v1.apps.services.html9
-rw-r--r--docs/dyn/appengine_v1beta.apps.services.html9
-rw-r--r--docs/dyn/area120tables_v1alpha1.tables.html2
-rw-r--r--docs/dyn/area120tables_v1alpha1.workspaces.html2
-rw-r--r--docs/dyn/assuredworkloads_v1.organizations.locations.workloads.html10
-rw-r--r--docs/dyn/bigquery_v2.jobs.html8
-rw-r--r--docs/dyn/bigquery_v2.models.html32
-rw-r--r--docs/dyn/clouderrorreporting_v1beta1.projects.events.html2
-rw-r--r--docs/dyn/cloudidentity_v1.groups.html10
-rw-r--r--docs/dyn/cloudidentity_v1beta1.groups.html10
-rw-r--r--docs/dyn/composer_v1.projects.locations.environments.html154
-rw-r--r--docs/dyn/composer_v1beta1.projects.locations.environments.html32
-rw-r--r--docs/dyn/container_v1.projects.locations.clusters.html16
-rw-r--r--docs/dyn/container_v1.projects.locations.clusters.nodePools.html16
-rw-r--r--docs/dyn/container_v1.projects.zones.clusters.html16
-rw-r--r--docs/dyn/container_v1.projects.zones.clusters.nodePools.html16
-rw-r--r--docs/dyn/container_v1beta1.projects.locations.clusters.html16
-rw-r--r--docs/dyn/container_v1beta1.projects.locations.clusters.nodePools.html16
-rw-r--r--docs/dyn/container_v1beta1.projects.zones.clusters.html16
-rw-r--r--docs/dyn/container_v1beta1.projects.zones.clusters.nodePools.html16
-rw-r--r--docs/dyn/content_v2_1.accounts.html19
-rw-r--r--docs/dyn/content_v2_1.localinventory.html6
-rw-r--r--docs/dyn/content_v2_1.orders.html14
-rw-r--r--docs/dyn/content_v2_1.promotions.html12
-rw-r--r--docs/dyn/datastore_v1.projects.html286
-rw-r--r--docs/dyn/datastore_v1beta3.projects.html286
-rw-r--r--docs/dyn/dialogflow_v2beta1.projects.conversations.participants.html4
-rw-r--r--docs/dyn/dialogflow_v2beta1.projects.locations.conversations.participants.html4
-rw-r--r--docs/dyn/dialogflow_v3.projects.locations.agents.environments.deployments.html174
-rw-r--r--docs/dyn/dialogflow_v3.projects.locations.agents.environments.html85
-rw-r--r--docs/dyn/dialogflow_v3.projects.locations.securitySettings.html14
-rw-r--r--docs/dyn/dialogflow_v3beta1.projects.locations.agents.environments.deployments.html174
-rw-r--r--docs/dyn/dialogflow_v3beta1.projects.locations.agents.environments.html85
-rw-r--r--docs/dyn/dialogflow_v3beta1.projects.locations.securitySettings.html14
-rw-r--r--docs/dyn/documentai_v1.projects.locations.processors.processorVersions.html4
-rw-r--r--docs/dyn/documentai_v1beta3.projects.locations.processors.processorVersions.html4
-rw-r--r--docs/dyn/drive_v2.files.html8
-rw-r--r--docs/dyn/drive_v3.files.html4
-rw-r--r--docs/dyn/firestore_v1.projects.databases.html106
-rw-r--r--docs/dyn/fitness_v1.users.dataset.html2
-rw-r--r--docs/dyn/gameservices_v1.projects.locations.gameServerDeployments.html6
-rw-r--r--docs/dyn/gkehub_v1.projects.locations.features.html112
-rw-r--r--docs/dyn/gkehub_v1alpha.projects.locations.features.html112
-rw-r--r--docs/dyn/gkehub_v1beta.projects.locations.features.html112
-rw-r--r--docs/dyn/healthcare_v1.projects.locations.datasets.consentStores.attributeDefinitions.html12
-rw-r--r--docs/dyn/healthcare_v1beta1.projects.locations.datasets.consentStores.attributeDefinitions.html12
-rw-r--r--docs/dyn/iam_v1.projects.serviceAccounts.keys.html8
-rw-r--r--docs/dyn/metastore_v1alpha.projects.locations.services.backups.html6
-rw-r--r--docs/dyn/metastore_v1alpha.projects.locations.services.html8
-rw-r--r--docs/dyn/metastore_v1beta.projects.locations.services.backups.html6
-rw-r--r--docs/dyn/metastore_v1beta.projects.locations.services.html8
-rw-r--r--docs/dyn/monitoring_v1.projects.dashboards.html2064
-rw-r--r--docs/dyn/policyanalyzer_v1.projects.locations.activityTypes.activities.html2
-rw-r--r--docs/dyn/retail_v2.projects.locations.catalogs.branches.products.html36
-rw-r--r--docs/dyn/retail_v2.projects.locations.catalogs.placements.html8
-rw-r--r--docs/dyn/retail_v2.projects.locations.catalogs.userEvents.html12
-rw-r--r--docs/dyn/retail_v2alpha.projects.locations.catalogs.branches.products.html36
-rw-r--r--docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html8
-rw-r--r--docs/dyn/retail_v2alpha.projects.locations.catalogs.userEvents.html12
-rw-r--r--docs/dyn/retail_v2beta.projects.locations.catalogs.branches.products.html36
-rw-r--r--docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html8
-rw-r--r--docs/dyn/retail_v2beta.projects.locations.catalogs.userEvents.html12
-rw-r--r--docs/dyn/servicenetworking_v1.services.html2
-rw-r--r--googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json4
-rw-r--r--googleapiclient/discovery_cache/documents/admin.datatransfer_v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/admin.directory_v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/admin.reports_v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/admob.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/admob.v1beta.json2
-rw-r--r--googleapiclient/discovery_cache/documents/adsense.v2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json6
-rw-r--r--googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json2
-rw-r--r--googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/androidenterprise.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/androidpublisher.v3.json2
-rw-r--r--googleapiclient/discovery_cache/documents/apigateway.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/apigateway.v1beta.json2
-rw-r--r--googleapiclient/discovery_cache/documents/apigee.v1.json3
-rw-r--r--googleapiclient/discovery_cache/documents/apikeys.v2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/appengine.v1.json9
-rw-r--r--googleapiclient/discovery_cache/documents/appengine.v1alpha.json2
-rw-r--r--googleapiclient/discovery_cache/documents/appengine.v1beta.json9
-rw-r--r--googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json6
-rw-r--r--googleapiclient/discovery_cache/documents/artifactregistry.v1.json4
-rw-r--r--googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json4
-rw-r--r--googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json4
-rw-r--r--googleapiclient/discovery_cache/documents/assuredworkloads.v1.json4
-rw-r--r--googleapiclient/discovery_cache/documents/bigquery.v2.json87
-rw-r--r--googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/bigqueryreservation.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/billingbudgets.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/billingbudgets.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/binaryauthorization.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/blogger.v2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/blogger.v3.json2
-rw-r--r--googleapiclient/discovery_cache/documents/books.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/calendar.v3.json2
-rw-r--r--googleapiclient/discovery_cache/documents/chat.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/chromemanagement.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/chromepolicy.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/chromeuxreport.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/classroom.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudasset.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudasset.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudasset.v1p1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudasset.v1p4beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudasset.v1p5beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudasset.v1p7beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudbilling.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudchannel.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/clouddebugger.v2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/clouderrorreporting.v1beta1.json4
-rw-r--r--googleapiclient/discovery_cache/documents/cloudfunctions.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudidentity.v1.json4
-rw-r--r--googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json4
-rw-r--r--googleapiclient/discovery_cache/documents/cloudiot.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudkms.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudprofiler.v2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudresourcemanager.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudresourcemanager.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudresourcemanager.v2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudresourcemanager.v3.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudscheduler.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudscheduler.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudshell.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudtasks.v2beta2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudtasks.v2beta3.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudtrace.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudtrace.v2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/cloudtrace.v2beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/composer.v1.json48
-rw-r--r--googleapiclient/discovery_cache/documents/composer.v1beta1.json10
-rw-r--r--googleapiclient/discovery_cache/documents/compute.alpha.json2
-rw-r--r--googleapiclient/discovery_cache/documents/compute.beta.json6
-rw-r--r--googleapiclient/discovery_cache/documents/compute.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/container.v1.json6
-rw-r--r--googleapiclient/discovery_cache/documents/container.v1beta1.json6
-rw-r--r--googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/content.v2.1.json43
-rw-r--r--googleapiclient/discovery_cache/documents/content.v2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/customsearch.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/datacatalog.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/datacatalog.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/datalabeling.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/datamigration.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/datamigration.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/dataproc.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/datastore.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/datastore.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/datastore.v1beta3.json2
-rw-r--r--googleapiclient/discovery_cache/documents/deploymentmanager.alpha.json2
-rw-r--r--googleapiclient/discovery_cache/documents/deploymentmanager.v2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/deploymentmanager.v2beta.json2
-rw-r--r--googleapiclient/discovery_cache/documents/dialogflow.v2.json198
-rw-r--r--googleapiclient/discovery_cache/documents/dialogflow.v2beta1.json217
-rw-r--r--googleapiclient/discovery_cache/documents/dialogflow.v3.json347
-rw-r--r--googleapiclient/discovery_cache/documents/dialogflow.v3beta1.json347
-rw-r--r--googleapiclient/discovery_cache/documents/displayvideo.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/dlp.v2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/dns.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/dns.v1beta2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/docs.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/documentai.v1.json4
-rw-r--r--googleapiclient/discovery_cache/documents/documentai.v1beta2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/documentai.v1beta3.json4
-rw-r--r--googleapiclient/discovery_cache/documents/domainsrdap.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/doubleclickbidmanager.v1.1.json60
-rw-r--r--googleapiclient/discovery_cache/documents/doubleclickbidmanager.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/doubleclicksearch.v2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/drive.v2.json8
-rw-r--r--googleapiclient/discovery_cache/documents/drive.v3.json6
-rw-r--r--googleapiclient/discovery_cache/documents/driveactivity.v2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/essentialcontacts.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/fcm.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/fcmdata.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/firebase.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/firebaseappcheck.v1beta.json2
-rw-r--r--googleapiclient/discovery_cache/documents/firebasedatabase.v1beta.json2
-rw-r--r--googleapiclient/discovery_cache/documents/firebasedynamiclinks.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/firebasehosting.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/firebasehosting.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/firebaseml.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/firebaseml.v1beta2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/firebasestorage.v1beta.json18
-rw-r--r--googleapiclient/discovery_cache/documents/firestore.v1.json152
-rw-r--r--googleapiclient/discovery_cache/documents/firestore.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/firestore.v1beta2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/fitness.v1.json4
-rw-r--r--googleapiclient/discovery_cache/documents/games.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/gamesConfiguration.v1configuration.json2
-rw-r--r--googleapiclient/discovery_cache/documents/gamesManagement.v1management.json2
-rw-r--r--googleapiclient/discovery_cache/documents/gameservices.v1.json4
-rw-r--r--googleapiclient/discovery_cache/documents/gkehub.v1.json68
-rw-r--r--googleapiclient/discovery_cache/documents/gkehub.v1alpha.json68
-rw-r--r--googleapiclient/discovery_cache/documents/gkehub.v1alpha2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/gkehub.v1beta.json68
-rw-r--r--googleapiclient/discovery_cache/documents/gkehub.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/gmail.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/gmailpostmastertools.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/gmailpostmastertools.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/groupsmigration.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/groupssettings.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/healthcare.v1.json4
-rw-r--r--googleapiclient/discovery_cache/documents/healthcare.v1beta1.json4
-rw-r--r--googleapiclient/discovery_cache/documents/homegraph.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/iam.v1.json6
-rw-r--r--googleapiclient/discovery_cache/documents/iamcredentials.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/iap.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/iap.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/ideahub.v1alpha.json2
-rw-r--r--googleapiclient/discovery_cache/documents/ideahub.v1beta.json2
-rw-r--r--googleapiclient/discovery_cache/documents/indexing.v3.json2
-rw-r--r--googleapiclient/discovery_cache/documents/jobs.v3.json2
-rw-r--r--googleapiclient/discovery_cache/documents/jobs.v3p1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/jobs.v4.json2
-rw-r--r--googleapiclient/discovery_cache/documents/keep.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/kgsearch.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/language.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/language.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/language.v1beta2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/libraryagent.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/licensing.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/localservices.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/manufacturers.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/memcache.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/memcache.v1beta2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/metastore.v1alpha.json4
-rw-r--r--googleapiclient/discovery_cache/documents/metastore.v1beta.json4
-rw-r--r--googleapiclient/discovery_cache/documents/monitoring.v1.json58
-rw-r--r--googleapiclient/discovery_cache/documents/monitoring.v3.json2
-rw-r--r--googleapiclient/discovery_cache/documents/mybusinessaccountmanagement.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/mybusinessbusinessinformation.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/mybusinessnotifications.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/mybusinessplaceactions.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/mybusinessverifications.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/ondemandscanning.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/orgpolicy.v2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/osconfig.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/osconfig.v1alpha.json2
-rw-r--r--googleapiclient/discovery_cache/documents/osconfig.v1beta.json2
-rw-r--r--googleapiclient/discovery_cache/documents/oslogin.v1.json12
-rw-r--r--googleapiclient/discovery_cache/documents/oslogin.v1alpha.json2
-rw-r--r--googleapiclient/discovery_cache/documents/oslogin.v1beta.json2
-rw-r--r--googleapiclient/discovery_cache/documents/pagespeedonline.v5.json2
-rw-r--r--googleapiclient/discovery_cache/documents/paymentsresellersubscription.v1.json6
-rw-r--r--googleapiclient/discovery_cache/documents/people.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/playablelocations.v3.json2
-rw-r--r--googleapiclient/discovery_cache/documents/playcustomapp.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/policyanalyzer.v1.json4
-rw-r--r--googleapiclient/discovery_cache/documents/policyanalyzer.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/policysimulator.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/policysimulator.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/policytroubleshooter.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/policytroubleshooter.v1beta.json2
-rw-r--r--googleapiclient/discovery_cache/documents/prod_tt_sasportal.v1alpha1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/pubsub.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/pubsub.v1beta1a.json2
-rw-r--r--googleapiclient/discovery_cache/documents/pubsub.v1beta2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/pubsublite.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/realtimebidding.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/realtimebidding.v1alpha.json4
-rw-r--r--googleapiclient/discovery_cache/documents/recaptchaenterprise.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/recommender.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/recommender.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/reseller.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/resourcesettings.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/retail.v2.json16
-rw-r--r--googleapiclient/discovery_cache/documents/retail.v2alpha.json16
-rw-r--r--googleapiclient/discovery_cache/documents/retail.v2beta.json16
-rw-r--r--googleapiclient/discovery_cache/documents/run.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/run.v1alpha1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/runtimeconfig.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/runtimeconfig.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/safebrowsing.v4.json2
-rw-r--r--googleapiclient/discovery_cache/documents/secretmanager.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/secretmanager.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/securitycenter.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/servicecontrol.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/servicecontrol.v2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/servicedirectory.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/servicedirectory.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/servicemanagement.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/servicenetworking.v1.json10
-rw-r--r--googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json2
-rw-r--r--googleapiclient/discovery_cache/documents/serviceusage.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/sheets.v4.json2
-rw-r--r--googleapiclient/discovery_cache/documents/storage.v1.json4
-rw-r--r--googleapiclient/discovery_cache/documents/streetviewpublish.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/sts.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/sts.v1beta.json2
-rw-r--r--googleapiclient/discovery_cache/documents/tagmanager.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/tagmanager.v2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/tasks.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/testing.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/texttospeech.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/toolresults.v1beta3.json2
-rw-r--r--googleapiclient/discovery_cache/documents/vectortile.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/webrisk.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/websecurityscanner.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/websecurityscanner.v1alpha.json2
-rw-r--r--googleapiclient/discovery_cache/documents/websecurityscanner.v1beta.json2
-rw-r--r--googleapiclient/discovery_cache/documents/workflowexecutions.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/workflowexecutions.v1beta.json2
-rw-r--r--googleapiclient/discovery_cache/documents/workflows.v1.json2
-rw-r--r--googleapiclient/discovery_cache/documents/workflows.v1beta.json2
-rw-r--r--googleapiclient/discovery_cache/documents/youtube.v3.json2
-rw-r--r--googleapiclient/discovery_cache/documents/youtubeAnalytics.v2.json2
-rw-r--r--googleapiclient/discovery_cache/documents/youtubereporting.v1.json2
329 files changed, 5826 insertions, 1000 deletions
diff --git a/docs/dyn/adexchangebuyer2_v2beta1.accounts.clients.html b/docs/dyn/adexchangebuyer2_v2beta1.accounts.clients.html
index ce7c6f4d7..dbc8460c7 100644
--- a/docs/dyn/adexchangebuyer2_v2beta1.accounts.clients.html
+++ b/docs/dyn/adexchangebuyer2_v2beta1.accounts.clients.html
@@ -119,7 +119,7 @@ Args:
{ # A client resource represents a client buyer—an agency, a brand, or an advertiser customer of the sponsor buyer. Users associated with the client buyer have restricted access to the Marketplace and certain other sections of the Authorized Buyers UI based on the role granted to the client buyer. All fields are required unless otherwise specified.
&quot;clientAccountId&quot;: &quot;A String&quot;, # The globally-unique numerical ID of the client. The value of this field is ignored in create and update operations.
- &quot;clientName&quot;: &quot;A String&quot;, # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty.
+ &quot;clientName&quot;: &quot;A String&quot;, # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty. Maximum length of 255 characters is allowed.
&quot;entityId&quot;: &quot;A String&quot;, # Numerical identifier of the client entity. The entity can be an advertiser, a brand, or an agency. This identifier is unique among all the entities with the same type. The value of this field is ignored if the entity type is not provided. A list of all known advertisers with their identifiers is available in the [advertisers.txt](https://storage.googleapis.com/adx-rtb-dictionaries/advertisers.txt) file. A list of all known brands with their identifiers is available in the [brands.txt](https://storage.googleapis.com/adx-rtb-dictionaries/brands.txt) file. A list of all known agencies with their identifiers is available in the [agencies.txt](https://storage.googleapis.com/adx-rtb-dictionaries/agencies.txt) file.
&quot;entityName&quot;: &quot;A String&quot;, # The name of the entity. This field is automatically fetched based on the type and ID. The value of this field is ignored in create and update operations.
&quot;entityType&quot;: &quot;A String&quot;, # An optional field for specifying the type of the client entity: `ADVERTISER`, `BRAND`, or `AGENCY`.
@@ -139,7 +139,7 @@ Returns:
{ # A client resource represents a client buyer—an agency, a brand, or an advertiser customer of the sponsor buyer. Users associated with the client buyer have restricted access to the Marketplace and certain other sections of the Authorized Buyers UI based on the role granted to the client buyer. All fields are required unless otherwise specified.
&quot;clientAccountId&quot;: &quot;A String&quot;, # The globally-unique numerical ID of the client. The value of this field is ignored in create and update operations.
- &quot;clientName&quot;: &quot;A String&quot;, # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty.
+ &quot;clientName&quot;: &quot;A String&quot;, # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty. Maximum length of 255 characters is allowed.
&quot;entityId&quot;: &quot;A String&quot;, # Numerical identifier of the client entity. The entity can be an advertiser, a brand, or an agency. This identifier is unique among all the entities with the same type. The value of this field is ignored if the entity type is not provided. A list of all known advertisers with their identifiers is available in the [advertisers.txt](https://storage.googleapis.com/adx-rtb-dictionaries/advertisers.txt) file. A list of all known brands with their identifiers is available in the [brands.txt](https://storage.googleapis.com/adx-rtb-dictionaries/brands.txt) file. A list of all known agencies with their identifiers is available in the [agencies.txt](https://storage.googleapis.com/adx-rtb-dictionaries/agencies.txt) file.
&quot;entityName&quot;: &quot;A String&quot;, # The name of the entity. This field is automatically fetched based on the type and ID. The value of this field is ignored in create and update operations.
&quot;entityType&quot;: &quot;A String&quot;, # An optional field for specifying the type of the client entity: `ADVERTISER`, `BRAND`, or `AGENCY`.
@@ -167,7 +167,7 @@ Returns:
{ # A client resource represents a client buyer—an agency, a brand, or an advertiser customer of the sponsor buyer. Users associated with the client buyer have restricted access to the Marketplace and certain other sections of the Authorized Buyers UI based on the role granted to the client buyer. All fields are required unless otherwise specified.
&quot;clientAccountId&quot;: &quot;A String&quot;, # The globally-unique numerical ID of the client. The value of this field is ignored in create and update operations.
- &quot;clientName&quot;: &quot;A String&quot;, # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty.
+ &quot;clientName&quot;: &quot;A String&quot;, # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty. Maximum length of 255 characters is allowed.
&quot;entityId&quot;: &quot;A String&quot;, # Numerical identifier of the client entity. The entity can be an advertiser, a brand, or an agency. This identifier is unique among all the entities with the same type. The value of this field is ignored if the entity type is not provided. A list of all known advertisers with their identifiers is available in the [advertisers.txt](https://storage.googleapis.com/adx-rtb-dictionaries/advertisers.txt) file. A list of all known brands with their identifiers is available in the [brands.txt](https://storage.googleapis.com/adx-rtb-dictionaries/brands.txt) file. A list of all known agencies with their identifiers is available in the [agencies.txt](https://storage.googleapis.com/adx-rtb-dictionaries/agencies.txt) file.
&quot;entityName&quot;: &quot;A String&quot;, # The name of the entity. This field is automatically fetched based on the type and ID. The value of this field is ignored in create and update operations.
&quot;entityType&quot;: &quot;A String&quot;, # An optional field for specifying the type of the client entity: `ADVERTISER`, `BRAND`, or `AGENCY`.
@@ -199,7 +199,7 @@ Returns:
&quot;clients&quot;: [ # The returned list of clients.
{ # A client resource represents a client buyer—an agency, a brand, or an advertiser customer of the sponsor buyer. Users associated with the client buyer have restricted access to the Marketplace and certain other sections of the Authorized Buyers UI based on the role granted to the client buyer. All fields are required unless otherwise specified.
&quot;clientAccountId&quot;: &quot;A String&quot;, # The globally-unique numerical ID of the client. The value of this field is ignored in create and update operations.
- &quot;clientName&quot;: &quot;A String&quot;, # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty.
+ &quot;clientName&quot;: &quot;A String&quot;, # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty. Maximum length of 255 characters is allowed.
&quot;entityId&quot;: &quot;A String&quot;, # Numerical identifier of the client entity. The entity can be an advertiser, a brand, or an agency. This identifier is unique among all the entities with the same type. The value of this field is ignored if the entity type is not provided. A list of all known advertisers with their identifiers is available in the [advertisers.txt](https://storage.googleapis.com/adx-rtb-dictionaries/advertisers.txt) file. A list of all known brands with their identifiers is available in the [brands.txt](https://storage.googleapis.com/adx-rtb-dictionaries/brands.txt) file. A list of all known agencies with their identifiers is available in the [agencies.txt](https://storage.googleapis.com/adx-rtb-dictionaries/agencies.txt) file.
&quot;entityName&quot;: &quot;A String&quot;, # The name of the entity. This field is automatically fetched based on the type and ID. The value of this field is ignored in create and update operations.
&quot;entityType&quot;: &quot;A String&quot;, # An optional field for specifying the type of the client entity: `ADVERTISER`, `BRAND`, or `AGENCY`.
@@ -239,7 +239,7 @@ Args:
{ # A client resource represents a client buyer—an agency, a brand, or an advertiser customer of the sponsor buyer. Users associated with the client buyer have restricted access to the Marketplace and certain other sections of the Authorized Buyers UI based on the role granted to the client buyer. All fields are required unless otherwise specified.
&quot;clientAccountId&quot;: &quot;A String&quot;, # The globally-unique numerical ID of the client. The value of this field is ignored in create and update operations.
- &quot;clientName&quot;: &quot;A String&quot;, # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty.
+ &quot;clientName&quot;: &quot;A String&quot;, # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty. Maximum length of 255 characters is allowed.
&quot;entityId&quot;: &quot;A String&quot;, # Numerical identifier of the client entity. The entity can be an advertiser, a brand, or an agency. This identifier is unique among all the entities with the same type. The value of this field is ignored if the entity type is not provided. A list of all known advertisers with their identifiers is available in the [advertisers.txt](https://storage.googleapis.com/adx-rtb-dictionaries/advertisers.txt) file. A list of all known brands with their identifiers is available in the [brands.txt](https://storage.googleapis.com/adx-rtb-dictionaries/brands.txt) file. A list of all known agencies with their identifiers is available in the [agencies.txt](https://storage.googleapis.com/adx-rtb-dictionaries/agencies.txt) file.
&quot;entityName&quot;: &quot;A String&quot;, # The name of the entity. This field is automatically fetched based on the type and ID. The value of this field is ignored in create and update operations.
&quot;entityType&quot;: &quot;A String&quot;, # An optional field for specifying the type of the client entity: `ADVERTISER`, `BRAND`, or `AGENCY`.
@@ -259,7 +259,7 @@ Returns:
{ # A client resource represents a client buyer—an agency, a brand, or an advertiser customer of the sponsor buyer. Users associated with the client buyer have restricted access to the Marketplace and certain other sections of the Authorized Buyers UI based on the role granted to the client buyer. All fields are required unless otherwise specified.
&quot;clientAccountId&quot;: &quot;A String&quot;, # The globally-unique numerical ID of the client. The value of this field is ignored in create and update operations.
- &quot;clientName&quot;: &quot;A String&quot;, # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty.
+ &quot;clientName&quot;: &quot;A String&quot;, # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty. Maximum length of 255 characters is allowed.
&quot;entityId&quot;: &quot;A String&quot;, # Numerical identifier of the client entity. The entity can be an advertiser, a brand, or an agency. This identifier is unique among all the entities with the same type. The value of this field is ignored if the entity type is not provided. A list of all known advertisers with their identifiers is available in the [advertisers.txt](https://storage.googleapis.com/adx-rtb-dictionaries/advertisers.txt) file. A list of all known brands with their identifiers is available in the [brands.txt](https://storage.googleapis.com/adx-rtb-dictionaries/brands.txt) file. A list of all known agencies with their identifiers is available in the [agencies.txt](https://storage.googleapis.com/adx-rtb-dictionaries/agencies.txt) file.
&quot;entityName&quot;: &quot;A String&quot;, # The name of the entity. This field is automatically fetched based on the type and ID. The value of this field is ignored in create and update operations.
&quot;entityType&quot;: &quot;A String&quot;, # An optional field for specifying the type of the client entity: `ADVERTISER`, `BRAND`, or `AGENCY`.
diff --git a/docs/dyn/analyticsadmin_v1alpha.accounts.html b/docs/dyn/analyticsadmin_v1alpha.accounts.html
index 049d91440..eb32f40aa 100644
--- a/docs/dyn/analyticsadmin_v1alpha.accounts.html
+++ b/docs/dyn/analyticsadmin_v1alpha.accounts.html
@@ -439,6 +439,7 @@ Returns:
&quot;secretValue&quot;: &quot;A String&quot;, # Output only. The measurement protocol secret value. Pass this value to the api_secret field of the Measurement Protocol API when sending hits to this secret&#x27;s parent property.
},
&quot;property&quot;: { # A resource message representing a Google Analytics GA4 property. # A snapshot of a Property resource in change history.
+ &quot;account&quot;: &quot;A String&quot;, # Immutable. The resource name of the parent account Format: accounts/{account_id} Example: &quot;accounts/123&quot;
&quot;createTime&quot;: &quot;A String&quot;, # Output only. Time when the entity was originally created.
&quot;currencyCode&quot;: &quot;A String&quot;, # The currency type used in reports involving monetary values. Format: https://en.wikipedia.org/wiki/ISO_4217 Examples: &quot;USD&quot;, &quot;EUR&quot;, &quot;JPY&quot;
&quot;deleteTime&quot;: &quot;A String&quot;, # Output only. If set, the time at which this property was trashed. If not set, then this property is not currently in the trash can.
@@ -561,6 +562,7 @@ Returns:
&quot;secretValue&quot;: &quot;A String&quot;, # Output only. The measurement protocol secret value. Pass this value to the api_secret field of the Measurement Protocol API when sending hits to this secret&#x27;s parent property.
},
&quot;property&quot;: { # A resource message representing a Google Analytics GA4 property. # A snapshot of a Property resource in change history.
+ &quot;account&quot;: &quot;A String&quot;, # Immutable. The resource name of the parent account Format: accounts/{account_id} Example: &quot;accounts/123&quot;
&quot;createTime&quot;: &quot;A String&quot;, # Output only. Time when the entity was originally created.
&quot;currencyCode&quot;: &quot;A String&quot;, # The currency type used in reports involving monetary values. Format: https://en.wikipedia.org/wiki/ISO_4217 Examples: &quot;USD&quot;, &quot;EUR&quot;, &quot;JPY&quot;
&quot;deleteTime&quot;: &quot;A String&quot;, # Output only. If set, the time at which this property was trashed. If not set, then this property is not currently in the trash can.
diff --git a/docs/dyn/analyticsadmin_v1alpha.properties.html b/docs/dyn/analyticsadmin_v1alpha.properties.html
index 2415b3919..be472f875 100644
--- a/docs/dyn/analyticsadmin_v1alpha.properties.html
+++ b/docs/dyn/analyticsadmin_v1alpha.properties.html
@@ -177,6 +177,7 @@ Args:
The object takes the form of:
{ # A resource message representing a Google Analytics GA4 property.
+ &quot;account&quot;: &quot;A String&quot;, # Immutable. The resource name of the parent account Format: accounts/{account_id} Example: &quot;accounts/123&quot;
&quot;createTime&quot;: &quot;A String&quot;, # Output only. Time when the entity was originally created.
&quot;currencyCode&quot;: &quot;A String&quot;, # The currency type used in reports involving monetary values. Format: https://en.wikipedia.org/wiki/ISO_4217 Examples: &quot;USD&quot;, &quot;EUR&quot;, &quot;JPY&quot;
&quot;deleteTime&quot;: &quot;A String&quot;, # Output only. If set, the time at which this property was trashed. If not set, then this property is not currently in the trash can.
@@ -199,6 +200,7 @@ Returns:
An object of the form:
{ # A resource message representing a Google Analytics GA4 property.
+ &quot;account&quot;: &quot;A String&quot;, # Immutable. The resource name of the parent account Format: accounts/{account_id} Example: &quot;accounts/123&quot;
&quot;createTime&quot;: &quot;A String&quot;, # Output only. Time when the entity was originally created.
&quot;currencyCode&quot;: &quot;A String&quot;, # The currency type used in reports involving monetary values. Format: https://en.wikipedia.org/wiki/ISO_4217 Examples: &quot;USD&quot;, &quot;EUR&quot;, &quot;JPY&quot;
&quot;deleteTime&quot;: &quot;A String&quot;, # Output only. If set, the time at which this property was trashed. If not set, then this property is not currently in the trash can.
@@ -228,6 +230,7 @@ Returns:
An object of the form:
{ # A resource message representing a Google Analytics GA4 property.
+ &quot;account&quot;: &quot;A String&quot;, # Immutable. The resource name of the parent account Format: accounts/{account_id} Example: &quot;accounts/123&quot;
&quot;createTime&quot;: &quot;A String&quot;, # Output only. Time when the entity was originally created.
&quot;currencyCode&quot;: &quot;A String&quot;, # The currency type used in reports involving monetary values. Format: https://en.wikipedia.org/wiki/ISO_4217 Examples: &quot;USD&quot;, &quot;EUR&quot;, &quot;JPY&quot;
&quot;deleteTime&quot;: &quot;A String&quot;, # Output only. If set, the time at which this property was trashed. If not set, then this property is not currently in the trash can.
@@ -257,6 +260,7 @@ Returns:
An object of the form:
{ # A resource message representing a Google Analytics GA4 property.
+ &quot;account&quot;: &quot;A String&quot;, # Immutable. The resource name of the parent account Format: accounts/{account_id} Example: &quot;accounts/123&quot;
&quot;createTime&quot;: &quot;A String&quot;, # Output only. Time when the entity was originally created.
&quot;currencyCode&quot;: &quot;A String&quot;, # The currency type used in reports involving monetary values. Format: https://en.wikipedia.org/wiki/ISO_4217 Examples: &quot;USD&quot;, &quot;EUR&quot;, &quot;JPY&quot;
&quot;deleteTime&quot;: &quot;A String&quot;, # Output only. If set, the time at which this property was trashed. If not set, then this property is not currently in the trash can.
@@ -334,6 +338,7 @@ Returns:
&quot;nextPageToken&quot;: &quot;A String&quot;, # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
&quot;properties&quot;: [ # Results that matched the filter criteria and were accessible to the caller.
{ # A resource message representing a Google Analytics GA4 property.
+ &quot;account&quot;: &quot;A String&quot;, # Immutable. The resource name of the parent account Format: accounts/{account_id} Example: &quot;accounts/123&quot;
&quot;createTime&quot;: &quot;A String&quot;, # Output only. Time when the entity was originally created.
&quot;currencyCode&quot;: &quot;A String&quot;, # The currency type used in reports involving monetary values. Format: https://en.wikipedia.org/wiki/ISO_4217 Examples: &quot;USD&quot;, &quot;EUR&quot;, &quot;JPY&quot;
&quot;deleteTime&quot;: &quot;A String&quot;, # Output only. If set, the time at which this property was trashed. If not set, then this property is not currently in the trash can.
@@ -374,6 +379,7 @@ Args:
The object takes the form of:
{ # A resource message representing a Google Analytics GA4 property.
+ &quot;account&quot;: &quot;A String&quot;, # Immutable. The resource name of the parent account Format: accounts/{account_id} Example: &quot;accounts/123&quot;
&quot;createTime&quot;: &quot;A String&quot;, # Output only. Time when the entity was originally created.
&quot;currencyCode&quot;: &quot;A String&quot;, # The currency type used in reports involving monetary values. Format: https://en.wikipedia.org/wiki/ISO_4217 Examples: &quot;USD&quot;, &quot;EUR&quot;, &quot;JPY&quot;
&quot;deleteTime&quot;: &quot;A String&quot;, # Output only. If set, the time at which this property was trashed. If not set, then this property is not currently in the trash can.
@@ -397,6 +403,7 @@ Returns:
An object of the form:
{ # A resource message representing a Google Analytics GA4 property.
+ &quot;account&quot;: &quot;A String&quot;, # Immutable. The resource name of the parent account Format: accounts/{account_id} Example: &quot;accounts/123&quot;
&quot;createTime&quot;: &quot;A String&quot;, # Output only. Time when the entity was originally created.
&quot;currencyCode&quot;: &quot;A String&quot;, # The currency type used in reports involving monetary values. Format: https://en.wikipedia.org/wiki/ISO_4217 Examples: &quot;USD&quot;, &quot;EUR&quot;, &quot;JPY&quot;
&quot;deleteTime&quot;: &quot;A String&quot;, # Output only. If set, the time at which this property was trashed. If not set, then this property is not currently in the trash can.
diff --git a/docs/dyn/apigee_v1.organizations.apiproducts.rateplans.html b/docs/dyn/apigee_v1.organizations.apiproducts.rateplans.html
index e6a737c86..84ea33cfc 100644
--- a/docs/dyn/apigee_v1.organizations.apiproducts.rateplans.html
+++ b/docs/dyn/apigee_v1.organizations.apiproducts.rateplans.html
@@ -135,7 +135,7 @@ Args:
},
&quot;lastModifiedAt&quot;: &quot;A String&quot;, # Output only. Time the rate plan was last modified in milliseconds since epoch.
&quot;name&quot;: &quot;A String&quot;, # Output only. Name of the rate plan.
- &quot;paymentFundingModel&quot;: &quot;A String&quot;, # Flag that specifies the billing account type, prepaid or postpaid.
+ &quot;paymentFundingModel&quot;: &quot;A String&quot;, # DEPRECATED: This field is no longer supported and will eventually be removed when Apigee Hybrid 1.5/1.6 is no longer supported. Instead, use the `billingType` field inside `DeveloperMonetizationConfig` resource. Flag that specifies the billing account type, prepaid or postpaid.
&quot;revenueShareRates&quot;: [ # Details of the revenue sharing model.
{ # API call volume range and the percentage of revenue to share with the developer when the total number of API calls is within the range.
&quot;end&quot;: &quot;A String&quot;, # Ending value of the range. Set to 0 or `null` for the last range of values.
@@ -189,7 +189,7 @@ Returns:
},
&quot;lastModifiedAt&quot;: &quot;A String&quot;, # Output only. Time the rate plan was last modified in milliseconds since epoch.
&quot;name&quot;: &quot;A String&quot;, # Output only. Name of the rate plan.
- &quot;paymentFundingModel&quot;: &quot;A String&quot;, # Flag that specifies the billing account type, prepaid or postpaid.
+ &quot;paymentFundingModel&quot;: &quot;A String&quot;, # DEPRECATED: This field is no longer supported and will eventually be removed when Apigee Hybrid 1.5/1.6 is no longer supported. Instead, use the `billingType` field inside `DeveloperMonetizationConfig` resource. Flag that specifies the billing account type, prepaid or postpaid.
&quot;revenueShareRates&quot;: [ # Details of the revenue sharing model.
{ # API call volume range and the percentage of revenue to share with the developer when the total number of API calls is within the range.
&quot;end&quot;: &quot;A String&quot;, # Ending value of the range. Set to 0 or `null` for the last range of values.
@@ -250,7 +250,7 @@ Returns:
},
&quot;lastModifiedAt&quot;: &quot;A String&quot;, # Output only. Time the rate plan was last modified in milliseconds since epoch.
&quot;name&quot;: &quot;A String&quot;, # Output only. Name of the rate plan.
- &quot;paymentFundingModel&quot;: &quot;A String&quot;, # Flag that specifies the billing account type, prepaid or postpaid.
+ &quot;paymentFundingModel&quot;: &quot;A String&quot;, # DEPRECATED: This field is no longer supported and will eventually be removed when Apigee Hybrid 1.5/1.6 is no longer supported. Instead, use the `billingType` field inside `DeveloperMonetizationConfig` resource. Flag that specifies the billing account type, prepaid or postpaid.
&quot;revenueShareRates&quot;: [ # Details of the revenue sharing model.
{ # API call volume range and the percentage of revenue to share with the developer when the total number of API calls is within the range.
&quot;end&quot;: &quot;A String&quot;, # Ending value of the range. Set to 0 or `null` for the last range of values.
@@ -311,7 +311,7 @@ Returns:
},
&quot;lastModifiedAt&quot;: &quot;A String&quot;, # Output only. Time the rate plan was last modified in milliseconds since epoch.
&quot;name&quot;: &quot;A String&quot;, # Output only. Name of the rate plan.
- &quot;paymentFundingModel&quot;: &quot;A String&quot;, # Flag that specifies the billing account type, prepaid or postpaid.
+ &quot;paymentFundingModel&quot;: &quot;A String&quot;, # DEPRECATED: This field is no longer supported and will eventually be removed when Apigee Hybrid 1.5/1.6 is no longer supported. Instead, use the `billingType` field inside `DeveloperMonetizationConfig` resource. Flag that specifies the billing account type, prepaid or postpaid.
&quot;revenueShareRates&quot;: [ # Details of the revenue sharing model.
{ # API call volume range and the percentage of revenue to share with the developer when the total number of API calls is within the range.
&quot;end&quot;: &quot;A String&quot;, # Ending value of the range. Set to 0 or `null` for the last range of values.
@@ -384,7 +384,7 @@ Returns:
},
&quot;lastModifiedAt&quot;: &quot;A String&quot;, # Output only. Time the rate plan was last modified in milliseconds since epoch.
&quot;name&quot;: &quot;A String&quot;, # Output only. Name of the rate plan.
- &quot;paymentFundingModel&quot;: &quot;A String&quot;, # Flag that specifies the billing account type, prepaid or postpaid.
+ &quot;paymentFundingModel&quot;: &quot;A String&quot;, # DEPRECATED: This field is no longer supported and will eventually be removed when Apigee Hybrid 1.5/1.6 is no longer supported. Instead, use the `billingType` field inside `DeveloperMonetizationConfig` resource. Flag that specifies the billing account type, prepaid or postpaid.
&quot;revenueShareRates&quot;: [ # Details of the revenue sharing model.
{ # API call volume range and the percentage of revenue to share with the developer when the total number of API calls is within the range.
&quot;end&quot;: &quot;A String&quot;, # Ending value of the range. Set to 0 or `null` for the last range of values.
@@ -442,7 +442,7 @@ Args:
},
&quot;lastModifiedAt&quot;: &quot;A String&quot;, # Output only. Time the rate plan was last modified in milliseconds since epoch.
&quot;name&quot;: &quot;A String&quot;, # Output only. Name of the rate plan.
- &quot;paymentFundingModel&quot;: &quot;A String&quot;, # Flag that specifies the billing account type, prepaid or postpaid.
+ &quot;paymentFundingModel&quot;: &quot;A String&quot;, # DEPRECATED: This field is no longer supported and will eventually be removed when Apigee Hybrid 1.5/1.6 is no longer supported. Instead, use the `billingType` field inside `DeveloperMonetizationConfig` resource. Flag that specifies the billing account type, prepaid or postpaid.
&quot;revenueShareRates&quot;: [ # Details of the revenue sharing model.
{ # API call volume range and the percentage of revenue to share with the developer when the total number of API calls is within the range.
&quot;end&quot;: &quot;A String&quot;, # Ending value of the range. Set to 0 or `null` for the last range of values.
@@ -496,7 +496,7 @@ Returns:
},
&quot;lastModifiedAt&quot;: &quot;A String&quot;, # Output only. Time the rate plan was last modified in milliseconds since epoch.
&quot;name&quot;: &quot;A String&quot;, # Output only. Name of the rate plan.
- &quot;paymentFundingModel&quot;: &quot;A String&quot;, # Flag that specifies the billing account type, prepaid or postpaid.
+ &quot;paymentFundingModel&quot;: &quot;A String&quot;, # DEPRECATED: This field is no longer supported and will eventually be removed when Apigee Hybrid 1.5/1.6 is no longer supported. Instead, use the `billingType` field inside `DeveloperMonetizationConfig` resource. Flag that specifies the billing account type, prepaid or postpaid.
&quot;revenueShareRates&quot;: [ # Details of the revenue sharing model.
{ # API call volume range and the percentage of revenue to share with the developer when the total number of API calls is within the range.
&quot;end&quot;: &quot;A String&quot;, # Ending value of the range. Set to 0 or `null` for the last range of values.
diff --git a/docs/dyn/apigee_v1.organizations.html b/docs/dyn/apigee_v1.organizations.html
index c755cc195..4edbf7395 100644
--- a/docs/dyn/apigee_v1.organizations.html
+++ b/docs/dyn/apigee_v1.organizations.html
@@ -230,7 +230,7 @@ Args:
&quot;createdAt&quot;: &quot;A String&quot;, # Output only. Time that the Apigee organization was created in milliseconds since epoch.
&quot;customerName&quot;: &quot;A String&quot;, # Not used by Apigee.
&quot;description&quot;: &quot;A String&quot;, # Description of the Apigee organization.
- &quot;displayName&quot;: &quot;A String&quot;,
+ &quot;displayName&quot;: &quot;A String&quot;, # Display name for the Apigee organization. Unused, but reserved for future use.
&quot;environments&quot;: [ # Output only. List of environments in the Apigee organization.
&quot;A String&quot;,
],
@@ -351,7 +351,7 @@ Returns:
&quot;createdAt&quot;: &quot;A String&quot;, # Output only. Time that the Apigee organization was created in milliseconds since epoch.
&quot;customerName&quot;: &quot;A String&quot;, # Not used by Apigee.
&quot;description&quot;: &quot;A String&quot;, # Description of the Apigee organization.
- &quot;displayName&quot;: &quot;A String&quot;,
+ &quot;displayName&quot;: &quot;A String&quot;, # Display name for the Apigee organization. Unused, but reserved for future use.
&quot;environments&quot;: [ # Output only. List of environments in the Apigee organization.
&quot;A String&quot;,
],
@@ -606,7 +606,7 @@ Args:
&quot;createdAt&quot;: &quot;A String&quot;, # Output only. Time that the Apigee organization was created in milliseconds since epoch.
&quot;customerName&quot;: &quot;A String&quot;, # Not used by Apigee.
&quot;description&quot;: &quot;A String&quot;, # Description of the Apigee organization.
- &quot;displayName&quot;: &quot;A String&quot;,
+ &quot;displayName&quot;: &quot;A String&quot;, # Display name for the Apigee organization. Unused, but reserved for future use.
&quot;environments&quot;: [ # Output only. List of environments in the Apigee organization.
&quot;A String&quot;,
],
@@ -656,7 +656,7 @@ Returns:
&quot;createdAt&quot;: &quot;A String&quot;, # Output only. Time that the Apigee organization was created in milliseconds since epoch.
&quot;customerName&quot;: &quot;A String&quot;, # Not used by Apigee.
&quot;description&quot;: &quot;A String&quot;, # Description of the Apigee organization.
- &quot;displayName&quot;: &quot;A String&quot;,
+ &quot;displayName&quot;: &quot;A String&quot;, # Display name for the Apigee organization. Unused, but reserved for future use.
&quot;environments&quot;: [ # Output only. List of environments in the Apigee organization.
&quot;A String&quot;,
],
diff --git a/docs/dyn/appengine_v1.apps.services.html b/docs/dyn/appengine_v1.apps.services.html
index 8a2126f82..2f63ab005 100644
--- a/docs/dyn/appengine_v1.apps.services.html
+++ b/docs/dyn/appengine_v1.apps.services.html
@@ -156,6 +156,9 @@ Returns:
{ # A Service resource is a logical component of an application that can share state and communicate in a secure fashion with other services. For example, an application that handles customer requests might include separate services to handle tasks such as backend data analysis or API requests from mobile devices. Each service has a collection of versions that define a specific set of code used to implement the functionality of that service.
&quot;id&quot;: &quot;A String&quot;, # Relative name of the service within the application. Example: default.@OutputOnly
+ &quot;labels&quot;: { # A set of labels to apply to this service. Labels are key/value pairs that describe the service and all resources that belong to it (e.g., versions). The labels can be used to search and group resources, and are propagated to the usage and billing reports, enabling fine-grain analysis of costs. An example of using labels is to tag resources belonging to different environments (e.g., &quot;env=prod&quot;, &quot;env=qa&quot;). Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores, dashes, and international characters. Label keys must start with a lowercase letter or an international character. Each service can have at most 32 labels.
+ &quot;a_key&quot;: &quot;A String&quot;,
+ },
&quot;name&quot;: &quot;A String&quot;, # Full path to the Service resource in the API. Example: apps/myapp/services/default.@OutputOnly
&quot;networkSettings&quot;: { # A NetworkSettings resource is a container for ingress settings for a version or service. # Ingress settings for this service. Will apply to all versions.
&quot;ingressTrafficAllowed&quot;: &quot;A String&quot;, # The ingress settings for version or service.
@@ -190,6 +193,9 @@ Returns:
&quot;services&quot;: [ # The services belonging to the requested application.
{ # A Service resource is a logical component of an application that can share state and communicate in a secure fashion with other services. For example, an application that handles customer requests might include separate services to handle tasks such as backend data analysis or API requests from mobile devices. Each service has a collection of versions that define a specific set of code used to implement the functionality of that service.
&quot;id&quot;: &quot;A String&quot;, # Relative name of the service within the application. Example: default.@OutputOnly
+ &quot;labels&quot;: { # A set of labels to apply to this service. Labels are key/value pairs that describe the service and all resources that belong to it (e.g., versions). The labels can be used to search and group resources, and are propagated to the usage and billing reports, enabling fine-grain analysis of costs. An example of using labels is to tag resources belonging to different environments (e.g., &quot;env=prod&quot;, &quot;env=qa&quot;). Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores, dashes, and international characters. Label keys must start with a lowercase letter or an international character. Each service can have at most 32 labels.
+ &quot;a_key&quot;: &quot;A String&quot;,
+ },
&quot;name&quot;: &quot;A String&quot;, # Full path to the Service resource in the API. Example: apps/myapp/services/default.@OutputOnly
&quot;networkSettings&quot;: { # A NetworkSettings resource is a container for ingress settings for a version or service. # Ingress settings for this service. Will apply to all versions.
&quot;ingressTrafficAllowed&quot;: &quot;A String&quot;, # The ingress settings for version or service.
@@ -231,6 +237,9 @@ Args:
{ # A Service resource is a logical component of an application that can share state and communicate in a secure fashion with other services. For example, an application that handles customer requests might include separate services to handle tasks such as backend data analysis or API requests from mobile devices. Each service has a collection of versions that define a specific set of code used to implement the functionality of that service.
&quot;id&quot;: &quot;A String&quot;, # Relative name of the service within the application. Example: default.@OutputOnly
+ &quot;labels&quot;: { # A set of labels to apply to this service. Labels are key/value pairs that describe the service and all resources that belong to it (e.g., versions). The labels can be used to search and group resources, and are propagated to the usage and billing reports, enabling fine-grain analysis of costs. An example of using labels is to tag resources belonging to different environments (e.g., &quot;env=prod&quot;, &quot;env=qa&quot;). Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores, dashes, and international characters. Label keys must start with a lowercase letter or an international character. Each service can have at most 32 labels.
+ &quot;a_key&quot;: &quot;A String&quot;,
+ },
&quot;name&quot;: &quot;A String&quot;, # Full path to the Service resource in the API. Example: apps/myapp/services/default.@OutputOnly
&quot;networkSettings&quot;: { # A NetworkSettings resource is a container for ingress settings for a version or service. # Ingress settings for this service. Will apply to all versions.
&quot;ingressTrafficAllowed&quot;: &quot;A String&quot;, # The ingress settings for version or service.
diff --git a/docs/dyn/appengine_v1beta.apps.services.html b/docs/dyn/appengine_v1beta.apps.services.html
index f12977b9e..0dea8a6fb 100644
--- a/docs/dyn/appengine_v1beta.apps.services.html
+++ b/docs/dyn/appengine_v1beta.apps.services.html
@@ -156,6 +156,9 @@ Returns:
{ # A Service resource is a logical component of an application that can share state and communicate in a secure fashion with other services. For example, an application that handles customer requests might include separate services to handle tasks such as backend data analysis or API requests from mobile devices. Each service has a collection of versions that define a specific set of code used to implement the functionality of that service.
&quot;id&quot;: &quot;A String&quot;, # Relative name of the service within the application. Example: default.@OutputOnly
+ &quot;labels&quot;: { # A set of labels to apply to this service. Labels are key/value pairs that describe the service and all resources that belong to it (e.g., versions). The labels can be used to search and group resources, and are propagated to the usage and billing reports, enabling fine-grain analysis of costs. An example of using labels is to tag resources belonging to different environments (e.g., &quot;env=prod&quot;, &quot;env=qa&quot;). Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores, dashes, and international characters. Label keys must start with a lowercase letter or an international character. Each service can have at most 32 labels.
+ &quot;a_key&quot;: &quot;A String&quot;,
+ },
&quot;name&quot;: &quot;A String&quot;, # Full path to the Service resource in the API. Example: apps/myapp/services/default.@OutputOnly
&quot;networkSettings&quot;: { # A NetworkSettings resource is a container for ingress settings for a version or service. # Ingress settings for this service. Will apply to all versions.
&quot;ingressTrafficAllowed&quot;: &quot;A String&quot;, # The ingress settings for version or service.
@@ -190,6 +193,9 @@ Returns:
&quot;services&quot;: [ # The services belonging to the requested application.
{ # A Service resource is a logical component of an application that can share state and communicate in a secure fashion with other services. For example, an application that handles customer requests might include separate services to handle tasks such as backend data analysis or API requests from mobile devices. Each service has a collection of versions that define a specific set of code used to implement the functionality of that service.
&quot;id&quot;: &quot;A String&quot;, # Relative name of the service within the application. Example: default.@OutputOnly
+ &quot;labels&quot;: { # A set of labels to apply to this service. Labels are key/value pairs that describe the service and all resources that belong to it (e.g., versions). The labels can be used to search and group resources, and are propagated to the usage and billing reports, enabling fine-grain analysis of costs. An example of using labels is to tag resources belonging to different environments (e.g., &quot;env=prod&quot;, &quot;env=qa&quot;). Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores, dashes, and international characters. Label keys must start with a lowercase letter or an international character. Each service can have at most 32 labels.
+ &quot;a_key&quot;: &quot;A String&quot;,
+ },
&quot;name&quot;: &quot;A String&quot;, # Full path to the Service resource in the API. Example: apps/myapp/services/default.@OutputOnly
&quot;networkSettings&quot;: { # A NetworkSettings resource is a container for ingress settings for a version or service. # Ingress settings for this service. Will apply to all versions.
&quot;ingressTrafficAllowed&quot;: &quot;A String&quot;, # The ingress settings for version or service.
@@ -231,6 +237,9 @@ Args:
{ # A Service resource is a logical component of an application that can share state and communicate in a secure fashion with other services. For example, an application that handles customer requests might include separate services to handle tasks such as backend data analysis or API requests from mobile devices. Each service has a collection of versions that define a specific set of code used to implement the functionality of that service.
&quot;id&quot;: &quot;A String&quot;, # Relative name of the service within the application. Example: default.@OutputOnly
+ &quot;labels&quot;: { # A set of labels to apply to this service. Labels are key/value pairs that describe the service and all resources that belong to it (e.g., versions). The labels can be used to search and group resources, and are propagated to the usage and billing reports, enabling fine-grain analysis of costs. An example of using labels is to tag resources belonging to different environments (e.g., &quot;env=prod&quot;, &quot;env=qa&quot;). Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores, dashes, and international characters. Label keys must start with a lowercase letter or an international character. Each service can have at most 32 labels.
+ &quot;a_key&quot;: &quot;A String&quot;,
+ },
&quot;name&quot;: &quot;A String&quot;, # Full path to the Service resource in the API. Example: apps/myapp/services/default.@OutputOnly
&quot;networkSettings&quot;: { # A NetworkSettings resource is a container for ingress settings for a version or service. # Ingress settings for this service. Will apply to all versions.
&quot;ingressTrafficAllowed&quot;: &quot;A String&quot;, # The ingress settings for version or service.
diff --git a/docs/dyn/area120tables_v1alpha1.tables.html b/docs/dyn/area120tables_v1alpha1.tables.html
index e37aa5970..19c160203 100644
--- a/docs/dyn/area120tables_v1alpha1.tables.html
+++ b/docs/dyn/area120tables_v1alpha1.tables.html
@@ -128,6 +128,7 @@ Returns:
},
&quot;multipleValuesDisallowed&quot;: True or False, # Optional. Indicates whether or not multiple values are allowed for array types where such a restriction is possible.
&quot;name&quot;: &quot;A String&quot;, # column name
+ &quot;readonly&quot;: True or False, # Optional. Indicates that values for the column cannot be set by the user.
&quot;relationshipDetails&quot;: { # Details about a relationship column. # Optional. Additional details about a relationship column. Specified when data_type is relationship.
&quot;linkedTable&quot;: &quot;A String&quot;, # The name of the table this relationship is linked to.
},
@@ -182,6 +183,7 @@ Returns:
},
&quot;multipleValuesDisallowed&quot;: True or False, # Optional. Indicates whether or not multiple values are allowed for array types where such a restriction is possible.
&quot;name&quot;: &quot;A String&quot;, # column name
+ &quot;readonly&quot;: True or False, # Optional. Indicates that values for the column cannot be set by the user.
&quot;relationshipDetails&quot;: { # Details about a relationship column. # Optional. Additional details about a relationship column. Specified when data_type is relationship.
&quot;linkedTable&quot;: &quot;A String&quot;, # The name of the table this relationship is linked to.
},
diff --git a/docs/dyn/area120tables_v1alpha1.workspaces.html b/docs/dyn/area120tables_v1alpha1.workspaces.html
index 180a27275..c98931743 100644
--- a/docs/dyn/area120tables_v1alpha1.workspaces.html
+++ b/docs/dyn/area120tables_v1alpha1.workspaces.html
@@ -128,6 +128,7 @@ Returns:
},
&quot;multipleValuesDisallowed&quot;: True or False, # Optional. Indicates whether or not multiple values are allowed for array types where such a restriction is possible.
&quot;name&quot;: &quot;A String&quot;, # column name
+ &quot;readonly&quot;: True or False, # Optional. Indicates that values for the column cannot be set by the user.
&quot;relationshipDetails&quot;: { # Details about a relationship column. # Optional. Additional details about a relationship column. Specified when data_type is relationship.
&quot;linkedTable&quot;: &quot;A String&quot;, # The name of the table this relationship is linked to.
},
@@ -189,6 +190,7 @@ Returns:
},
&quot;multipleValuesDisallowed&quot;: True or False, # Optional. Indicates whether or not multiple values are allowed for array types where such a restriction is possible.
&quot;name&quot;: &quot;A String&quot;, # column name
+ &quot;readonly&quot;: True or False, # Optional. Indicates that values for the column cannot be set by the user.
&quot;relationshipDetails&quot;: { # Details about a relationship column. # Optional. Additional details about a relationship column. Specified when data_type is relationship.
&quot;linkedTable&quot;: &quot;A String&quot;, # The name of the table this relationship is linked to.
},
diff --git a/docs/dyn/assuredworkloads_v1.organizations.locations.workloads.html b/docs/dyn/assuredworkloads_v1.organizations.locations.workloads.html
index 0fb6d0160..2e84ee529 100644
--- a/docs/dyn/assuredworkloads_v1.organizations.locations.workloads.html
+++ b/docs/dyn/assuredworkloads_v1.organizations.locations.workloads.html
@@ -111,7 +111,7 @@ Args:
The object takes the form of:
{ # An Workload object for managing highly regulated workloads of cloud customers.
- &quot;billingAccount&quot;: &quot;A String&quot;, # Required. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`.
+ &quot;billingAccount&quot;: &quot;A String&quot;, # Optional. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`.
&quot;complianceRegime&quot;: &quot;A String&quot;, # Required. Immutable. Compliance Regime associated with this workload.
&quot;createTime&quot;: &quot;A String&quot;, # Output only. Immutable. The Workload creation timestamp.
&quot;displayName&quot;: &quot;A String&quot;, # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload
@@ -204,7 +204,7 @@ Returns:
An object of the form:
{ # An Workload object for managing highly regulated workloads of cloud customers.
- &quot;billingAccount&quot;: &quot;A String&quot;, # Required. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`.
+ &quot;billingAccount&quot;: &quot;A String&quot;, # Optional. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`.
&quot;complianceRegime&quot;: &quot;A String&quot;, # Required. Immutable. Compliance Regime associated with this workload.
&quot;createTime&quot;: &quot;A String&quot;, # Output only. Immutable. The Workload creation timestamp.
&quot;displayName&quot;: &quot;A String&quot;, # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload
@@ -255,7 +255,7 @@ Returns:
&quot;nextPageToken&quot;: &quot;A String&quot;, # The next page token. Return empty if reached the last page.
&quot;workloads&quot;: [ # List of Workloads under a given parent.
{ # An Workload object for managing highly regulated workloads of cloud customers.
- &quot;billingAccount&quot;: &quot;A String&quot;, # Required. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`.
+ &quot;billingAccount&quot;: &quot;A String&quot;, # Optional. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`.
&quot;complianceRegime&quot;: &quot;A String&quot;, # Required. Immutable. Compliance Regime associated with this workload.
&quot;createTime&quot;: &quot;A String&quot;, # Output only. Immutable. The Workload creation timestamp.
&quot;displayName&quot;: &quot;A String&quot;, # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload
@@ -311,7 +311,7 @@ Args:
The object takes the form of:
{ # An Workload object for managing highly regulated workloads of cloud customers.
- &quot;billingAccount&quot;: &quot;A String&quot;, # Required. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`.
+ &quot;billingAccount&quot;: &quot;A String&quot;, # Optional. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`.
&quot;complianceRegime&quot;: &quot;A String&quot;, # Required. Immutable. Compliance Regime associated with this workload.
&quot;createTime&quot;: &quot;A String&quot;, # Output only. Immutable. The Workload creation timestamp.
&quot;displayName&quot;: &quot;A String&quot;, # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload
@@ -350,7 +350,7 @@ Returns:
An object of the form:
{ # An Workload object for managing highly regulated workloads of cloud customers.
- &quot;billingAccount&quot;: &quot;A String&quot;, # Required. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`.
+ &quot;billingAccount&quot;: &quot;A String&quot;, # Optional. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`.
&quot;complianceRegime&quot;: &quot;A String&quot;, # Required. Immutable. Compliance Regime associated with this workload.
&quot;createTime&quot;: &quot;A String&quot;, # Output only. Immutable. The Workload creation timestamp.
&quot;displayName&quot;: &quot;A String&quot;, # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload
diff --git a/docs/dyn/bigquery_v2.jobs.html b/docs/dyn/bigquery_v2.jobs.html
index 69ae2ad53..b9f6b6fcd 100644
--- a/docs/dyn/bigquery_v2.jobs.html
+++ b/docs/dyn/bigquery_v2.jobs.html
@@ -82,7 +82,7 @@ h1, h2, h3 {
<p class="firstline">Close httplib2 connections.</p>
<p class="toc_element">
<code><a href="#delete">delete(projectId, jobId, location=None)</a></code></p>
-<p class="firstline">Requests that a job is deleted. This call will return when the job is deleted. This method is available in limited preview.</p>
+<p class="firstline">Requests the deletion of the metadata of a job. This call returns when the job's metadata is deleted.</p>
<p class="toc_element">
<code><a href="#get">get(projectId, jobId, location=None)</a></code></p>
<p class="firstline">Returns information about a specific job. Job information is available for a six month period after creation. Requires that you're the person who ran the job, or have the Is Owner project role.</p>
@@ -728,11 +728,11 @@ Returns:
<div class="method">
<code class="details" id="delete">delete(projectId, jobId, location=None)</code>
- <pre>Requests that a job is deleted. This call will return when the job is deleted. This method is available in limited preview.
+ <pre>Requests the deletion of the metadata of a job. This call returns when the job&#x27;s metadata is deleted.
Args:
- projectId: string, Required. Project ID of the job to be deleted. (required)
- jobId: string, Required. Job ID of the job to be deleted. If this is a parent job which has child jobs, all child jobs will be deleted as well. Deletion of child jobs directly is not allowed. (required)
+ projectId: string, Required. Project ID of the job for which metadata is to be deleted. (required)
+ jobId: string, Required. Job ID of the job for which metadata is to be deleted. If this is a parent job which has child jobs, the metadata from all child jobs will be deleted as well. Direct deletion of the metadata of child jobs is not allowed. (required)
location: string, The geographic location of the job. Required. See details at: https://cloud.google.com/bigquery/docs/locations#specifying_your_location.
</pre>
</div>
diff --git a/docs/dyn/bigquery_v2.models.html b/docs/dyn/bigquery_v2.models.html
index 3679ea8eb..81bd48f6a 100644
--- a/docs/dyn/bigquery_v2.models.html
+++ b/docs/dyn/bigquery_v2.models.html
@@ -387,7 +387,12 @@ Returns:
&quot;autoArima&quot;: True or False, # Whether to enable auto ARIMA or not.
&quot;autoArimaMaxOrder&quot;: &quot;A String&quot;, # The max value of non-seasonal p and q.
&quot;batchSize&quot;: &quot;A String&quot;, # Batch size for dnn models.
+ &quot;boosterType&quot;: &quot;A String&quot;, # Booster type for boosted tree models.
&quot;cleanSpikesAndDips&quot;: True or False, # If true, clean spikes and dips in the input time series.
+ &quot;colsampleBylevel&quot;: 3.14, # Subsample ratio of columns for each level for boosted tree models.
+ &quot;colsampleBynode&quot;: 3.14, # Subsample ratio of columns for each node(split) for boosted tree models.
+ &quot;colsampleBytree&quot;: 3.14, # Subsample ratio of columns when constructing each tree for boosted tree models.
+ &quot;dartNormalizeType&quot;: &quot;A String&quot;, # Type of normalization algorithm for boosted tree models using dart booster.
&quot;dataFrequency&quot;: &quot;A String&quot;, # The data frequency of a time series.
&quot;dataSplitColumn&quot;: &quot;A String&quot;, # The column to split data with. This column won&#x27;t be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties
&quot;dataSplitEvalFraction&quot;: 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2.
@@ -422,6 +427,7 @@ Returns:
&quot;maxTreeDepth&quot;: &quot;A String&quot;, # Maximum depth of a tree for boosted tree models.
&quot;minRelativeProgress&quot;: 3.14, # When early_stop is true, stops training when accuracy improvement is less than &#x27;min_relative_progress&#x27;. Used only for iterative training algorithms.
&quot;minSplitLoss&quot;: 3.14, # Minimum split loss for boosted tree models.
+ &quot;minTreeChildWeight&quot;: &quot;A String&quot;, # Minimum sum of instance weight needed in a child for boosted tree models.
&quot;modelUri&quot;: &quot;A String&quot;, # Google Cloud Storage URI from which the model was imported. Only applicable for imported models.
&quot;nonSeasonalOrder&quot;: { # Arima order, can be used for both non-seasonal and seasonal parts. # A specification of the non-seasonal part of the ARIMA model: the three components (p, d, q) are the AR order, the degree of differencing, and the MA order.
&quot;d&quot;: &quot;A String&quot;, # Order of the differencing part.
@@ -430,6 +436,7 @@ Returns:
},
&quot;numClusters&quot;: &quot;A String&quot;, # Number of clusters for clustering models.
&quot;numFactors&quot;: &quot;A String&quot;, # Num factors specified for matrix factorization models.
+ &quot;numParallelTree&quot;: &quot;A String&quot;, # Number of parallel trees constructed during each iteration for boosted tree models.
&quot;optimizationStrategy&quot;: &quot;A String&quot;, # Optimization strategy for training linear regression models.
&quot;preserveInputStructs&quot;: True or False, # Whether to preserve the input structs in output feature names. Suppose there is a struct A with field b. When false (default), the output feature name is A_b. When true, the output feature name is A.b.
&quot;subsample&quot;: 3.14, # Subsample fraction of the training data to grow tree to prevent overfitting for boosted tree models.
@@ -439,6 +446,7 @@ Returns:
&quot;A String&quot;,
],
&quot;timeSeriesTimestampColumn&quot;: &quot;A String&quot;, # Column to be designated as time series timestamp for ARIMA model.
+ &quot;treeMethod&quot;: &quot;A String&quot;, # Tree construction algorithm for boosted tree models.
&quot;userColumn&quot;: &quot;A String&quot;, # User column specified for matrix factorization models.
&quot;walsAlpha&quot;: 3.14, # Hyperparameter for matrix factoration when implicit feedback type is specified.
&quot;warmStart&quot;: True or False, # Whether to train a model from the last checkpoint.
@@ -729,7 +737,12 @@ Returns:
&quot;autoArima&quot;: True or False, # Whether to enable auto ARIMA or not.
&quot;autoArimaMaxOrder&quot;: &quot;A String&quot;, # The max value of non-seasonal p and q.
&quot;batchSize&quot;: &quot;A String&quot;, # Batch size for dnn models.
+ &quot;boosterType&quot;: &quot;A String&quot;, # Booster type for boosted tree models.
&quot;cleanSpikesAndDips&quot;: True or False, # If true, clean spikes and dips in the input time series.
+ &quot;colsampleBylevel&quot;: 3.14, # Subsample ratio of columns for each level for boosted tree models.
+ &quot;colsampleBynode&quot;: 3.14, # Subsample ratio of columns for each node(split) for boosted tree models.
+ &quot;colsampleBytree&quot;: 3.14, # Subsample ratio of columns when constructing each tree for boosted tree models.
+ &quot;dartNormalizeType&quot;: &quot;A String&quot;, # Type of normalization algorithm for boosted tree models using dart booster.
&quot;dataFrequency&quot;: &quot;A String&quot;, # The data frequency of a time series.
&quot;dataSplitColumn&quot;: &quot;A String&quot;, # The column to split data with. This column won&#x27;t be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties
&quot;dataSplitEvalFraction&quot;: 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2.
@@ -764,6 +777,7 @@ Returns:
&quot;maxTreeDepth&quot;: &quot;A String&quot;, # Maximum depth of a tree for boosted tree models.
&quot;minRelativeProgress&quot;: 3.14, # When early_stop is true, stops training when accuracy improvement is less than &#x27;min_relative_progress&#x27;. Used only for iterative training algorithms.
&quot;minSplitLoss&quot;: 3.14, # Minimum split loss for boosted tree models.
+ &quot;minTreeChildWeight&quot;: &quot;A String&quot;, # Minimum sum of instance weight needed in a child for boosted tree models.
&quot;modelUri&quot;: &quot;A String&quot;, # Google Cloud Storage URI from which the model was imported. Only applicable for imported models.
&quot;nonSeasonalOrder&quot;: { # Arima order, can be used for both non-seasonal and seasonal parts. # A specification of the non-seasonal part of the ARIMA model: the three components (p, d, q) are the AR order, the degree of differencing, and the MA order.
&quot;d&quot;: &quot;A String&quot;, # Order of the differencing part.
@@ -772,6 +786,7 @@ Returns:
},
&quot;numClusters&quot;: &quot;A String&quot;, # Number of clusters for clustering models.
&quot;numFactors&quot;: &quot;A String&quot;, # Num factors specified for matrix factorization models.
+ &quot;numParallelTree&quot;: &quot;A String&quot;, # Number of parallel trees constructed during each iteration for boosted tree models.
&quot;optimizationStrategy&quot;: &quot;A String&quot;, # Optimization strategy for training linear regression models.
&quot;preserveInputStructs&quot;: True or False, # Whether to preserve the input structs in output feature names. Suppose there is a struct A with field b. When false (default), the output feature name is A_b. When true, the output feature name is A.b.
&quot;subsample&quot;: 3.14, # Subsample fraction of the training data to grow tree to prevent overfitting for boosted tree models.
@@ -781,6 +796,7 @@ Returns:
&quot;A String&quot;,
],
&quot;timeSeriesTimestampColumn&quot;: &quot;A String&quot;, # Column to be designated as time series timestamp for ARIMA model.
+ &quot;treeMethod&quot;: &quot;A String&quot;, # Tree construction algorithm for boosted tree models.
&quot;userColumn&quot;: &quot;A String&quot;, # User column specified for matrix factorization models.
&quot;walsAlpha&quot;: 3.14, # Hyperparameter for matrix factoration when implicit feedback type is specified.
&quot;warmStart&quot;: True or False, # Whether to train a model from the last checkpoint.
@@ -1084,7 +1100,12 @@ Args:
&quot;autoArima&quot;: True or False, # Whether to enable auto ARIMA or not.
&quot;autoArimaMaxOrder&quot;: &quot;A String&quot;, # The max value of non-seasonal p and q.
&quot;batchSize&quot;: &quot;A String&quot;, # Batch size for dnn models.
+ &quot;boosterType&quot;: &quot;A String&quot;, # Booster type for boosted tree models.
&quot;cleanSpikesAndDips&quot;: True or False, # If true, clean spikes and dips in the input time series.
+ &quot;colsampleBylevel&quot;: 3.14, # Subsample ratio of columns for each level for boosted tree models.
+ &quot;colsampleBynode&quot;: 3.14, # Subsample ratio of columns for each node(split) for boosted tree models.
+ &quot;colsampleBytree&quot;: 3.14, # Subsample ratio of columns when constructing each tree for boosted tree models.
+ &quot;dartNormalizeType&quot;: &quot;A String&quot;, # Type of normalization algorithm for boosted tree models using dart booster.
&quot;dataFrequency&quot;: &quot;A String&quot;, # The data frequency of a time series.
&quot;dataSplitColumn&quot;: &quot;A String&quot;, # The column to split data with. This column won&#x27;t be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties
&quot;dataSplitEvalFraction&quot;: 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2.
@@ -1119,6 +1140,7 @@ Args:
&quot;maxTreeDepth&quot;: &quot;A String&quot;, # Maximum depth of a tree for boosted tree models.
&quot;minRelativeProgress&quot;: 3.14, # When early_stop is true, stops training when accuracy improvement is less than &#x27;min_relative_progress&#x27;. Used only for iterative training algorithms.
&quot;minSplitLoss&quot;: 3.14, # Minimum split loss for boosted tree models.
+ &quot;minTreeChildWeight&quot;: &quot;A String&quot;, # Minimum sum of instance weight needed in a child for boosted tree models.
&quot;modelUri&quot;: &quot;A String&quot;, # Google Cloud Storage URI from which the model was imported. Only applicable for imported models.
&quot;nonSeasonalOrder&quot;: { # Arima order, can be used for both non-seasonal and seasonal parts. # A specification of the non-seasonal part of the ARIMA model: the three components (p, d, q) are the AR order, the degree of differencing, and the MA order.
&quot;d&quot;: &quot;A String&quot;, # Order of the differencing part.
@@ -1127,6 +1149,7 @@ Args:
},
&quot;numClusters&quot;: &quot;A String&quot;, # Number of clusters for clustering models.
&quot;numFactors&quot;: &quot;A String&quot;, # Num factors specified for matrix factorization models.
+ &quot;numParallelTree&quot;: &quot;A String&quot;, # Number of parallel trees constructed during each iteration for boosted tree models.
&quot;optimizationStrategy&quot;: &quot;A String&quot;, # Optimization strategy for training linear regression models.
&quot;preserveInputStructs&quot;: True or False, # Whether to preserve the input structs in output feature names. Suppose there is a struct A with field b. When false (default), the output feature name is A_b. When true, the output feature name is A.b.
&quot;subsample&quot;: 3.14, # Subsample fraction of the training data to grow tree to prevent overfitting for boosted tree models.
@@ -1136,6 +1159,7 @@ Args:
&quot;A String&quot;,
],
&quot;timeSeriesTimestampColumn&quot;: &quot;A String&quot;, # Column to be designated as time series timestamp for ARIMA model.
+ &quot;treeMethod&quot;: &quot;A String&quot;, # Tree construction algorithm for boosted tree models.
&quot;userColumn&quot;: &quot;A String&quot;, # User column specified for matrix factorization models.
&quot;walsAlpha&quot;: 3.14, # Hyperparameter for matrix factoration when implicit feedback type is specified.
&quot;warmStart&quot;: True or False, # Whether to train a model from the last checkpoint.
@@ -1414,7 +1438,12 @@ Returns:
&quot;autoArima&quot;: True or False, # Whether to enable auto ARIMA or not.
&quot;autoArimaMaxOrder&quot;: &quot;A String&quot;, # The max value of non-seasonal p and q.
&quot;batchSize&quot;: &quot;A String&quot;, # Batch size for dnn models.
+ &quot;boosterType&quot;: &quot;A String&quot;, # Booster type for boosted tree models.
&quot;cleanSpikesAndDips&quot;: True or False, # If true, clean spikes and dips in the input time series.
+ &quot;colsampleBylevel&quot;: 3.14, # Subsample ratio of columns for each level for boosted tree models.
+ &quot;colsampleBynode&quot;: 3.14, # Subsample ratio of columns for each node(split) for boosted tree models.
+ &quot;colsampleBytree&quot;: 3.14, # Subsample ratio of columns when constructing each tree for boosted tree models.
+ &quot;dartNormalizeType&quot;: &quot;A String&quot;, # Type of normalization algorithm for boosted tree models using dart booster.
&quot;dataFrequency&quot;: &quot;A String&quot;, # The data frequency of a time series.
&quot;dataSplitColumn&quot;: &quot;A String&quot;, # The column to split data with. This column won&#x27;t be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties
&quot;dataSplitEvalFraction&quot;: 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2.
@@ -1449,6 +1478,7 @@ Returns:
&quot;maxTreeDepth&quot;: &quot;A String&quot;, # Maximum depth of a tree for boosted tree models.
&quot;minRelativeProgress&quot;: 3.14, # When early_stop is true, stops training when accuracy improvement is less than &#x27;min_relative_progress&#x27;. Used only for iterative training algorithms.
&quot;minSplitLoss&quot;: 3.14, # Minimum split loss for boosted tree models.
+ &quot;minTreeChildWeight&quot;: &quot;A String&quot;, # Minimum sum of instance weight needed in a child for boosted tree models.
&quot;modelUri&quot;: &quot;A String&quot;, # Google Cloud Storage URI from which the model was imported. Only applicable for imported models.
&quot;nonSeasonalOrder&quot;: { # Arima order, can be used for both non-seasonal and seasonal parts. # A specification of the non-seasonal part of the ARIMA model: the three components (p, d, q) are the AR order, the degree of differencing, and the MA order.
&quot;d&quot;: &quot;A String&quot;, # Order of the differencing part.
@@ -1457,6 +1487,7 @@ Returns:
},
&quot;numClusters&quot;: &quot;A String&quot;, # Number of clusters for clustering models.
&quot;numFactors&quot;: &quot;A String&quot;, # Num factors specified for matrix factorization models.
+ &quot;numParallelTree&quot;: &quot;A String&quot;, # Number of parallel trees constructed during each iteration for boosted tree models.
&quot;optimizationStrategy&quot;: &quot;A String&quot;, # Optimization strategy for training linear regression models.
&quot;preserveInputStructs&quot;: True or False, # Whether to preserve the input structs in output feature names. Suppose there is a struct A with field b. When false (default), the output feature name is A_b. When true, the output feature name is A.b.
&quot;subsample&quot;: 3.14, # Subsample fraction of the training data to grow tree to prevent overfitting for boosted tree models.
@@ -1466,6 +1497,7 @@ Returns:
&quot;A String&quot;,
],
&quot;timeSeriesTimestampColumn&quot;: &quot;A String&quot;, # Column to be designated as time series timestamp for ARIMA model.
+ &quot;treeMethod&quot;: &quot;A String&quot;, # Tree construction algorithm for boosted tree models.
&quot;userColumn&quot;: &quot;A String&quot;, # User column specified for matrix factorization models.
&quot;walsAlpha&quot;: 3.14, # Hyperparameter for matrix factoration when implicit feedback type is specified.
&quot;warmStart&quot;: True or False, # Whether to train a model from the last checkpoint.
diff --git a/docs/dyn/clouderrorreporting_v1beta1.projects.events.html b/docs/dyn/clouderrorreporting_v1beta1.projects.events.html
index 4742aff4d..75a2e1ced 100644
--- a/docs/dyn/clouderrorreporting_v1beta1.projects.events.html
+++ b/docs/dyn/clouderrorreporting_v1beta1.projects.events.html
@@ -205,7 +205,7 @@ Args:
],
&quot;user&quot;: &quot;A String&quot;, # The user who caused or was affected by the crash. This can be a user ID, an email address, or an arbitrary token that uniquely identifies the user. When sending an error report, leave this field empty if the user was not logged in. In this case the Error Reporting system will use other data, such as remote IP address, to distinguish affected users. See `affected_users_count` in `ErrorGroupStats`.
},
- &quot;eventTime&quot;: &quot;A String&quot;, # Optional. Time when the event occurred. If not provided, the time when the event was received by the Error Reporting system will be used.
+ &quot;eventTime&quot;: &quot;A String&quot;, # Optional. Time when the event occurred. If not provided, the time when the event was received by the Error Reporting system is used. If provided, the time must not exceed the [logs retention period](https://cloud.google.com/logging/quotas#logs_retention_periods) in the past, or be more than 24 hours in the future. If an invalid time is provided, then an error is returned.
&quot;message&quot;: &quot;A String&quot;, # Required. The error message. If no `context.reportLocation` is provided, the message must contain a header (typically consisting of the exception type name and an error message) and an exception stack trace in one of the supported programming languages and formats. Supported languages are Java, Python, JavaScript, Ruby, C#, PHP, and Go. Supported stack trace formats are: * **Java**: Must be the return value of [`Throwable.printStackTrace()`](https://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html#printStackTrace%28%29). * **Python**: Must be the return value of [`traceback.format_exc()`](https://docs.python.org/2/library/traceback.html#traceback.format_exc). * **JavaScript**: Must be the value of [`error.stack`](https://github.com/v8/v8/wiki/Stack-Trace-API) as returned by V8. * **Ruby**: Must contain frames returned by [`Exception.backtrace`](https://ruby-doc.org/core-2.2.0/Exception.html#method-i-backtrace). * **C#**: Must be the return value of [`Exception.ToString()`](https://msdn.microsoft.com/en-us/library/system.exception.tostring.aspx). * **PHP**: Must start with `PHP (Notice|Parse error|Fatal error|Warning)` and contain the result of [`(string)$exception`](http://php.net/manual/en/exception.tostring.php). * **Go**: Must be the return value of [`runtime.Stack()`](https://golang.org/pkg/runtime/debug/#Stack).
&quot;serviceContext&quot;: { # Describes a running service that sends errors. Its version changes over time and multiple versions can run in parallel. # Required. The service context in which this error has occurred.
&quot;resourceType&quot;: &quot;A String&quot;, # Type of the MonitoredResource. List of possible values: https://cloud.google.com/monitoring/api/resources Value is set automatically for incoming errors and must not be set when reporting errors.
diff --git a/docs/dyn/cloudidentity_v1.groups.html b/docs/dyn/cloudidentity_v1.groups.html
index 8ea66c581..27402fd76 100644
--- a/docs/dyn/cloudidentity_v1.groups.html
+++ b/docs/dyn/cloudidentity_v1.groups.html
@@ -130,7 +130,7 @@ Args:
&quot;dynamicGroupMetadata&quot;: { # Dynamic group metadata like queries and status. # Optional. Dynamic group metadata like queries and status.
&quot;queries&quot;: [ # Memberships will be the union of all queries. Only one entry with USER resource is currently supported. Customers can create up to 100 dynamic groups.
{ # Defines a query on a resource.
- &quot;query&quot;: &quot;A String&quot;, # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department==&#x27;engineering&#x27;)` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area==&#x27;foo&#x27; &amp;&amp; loc.building_id==&#x27;bar&#x27;)`
+ &quot;query&quot;: &quot;A String&quot;, # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department==&#x27;engineering&#x27;)` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area==&#x27;foo&#x27; &amp;&amp; loc.building_id==&#x27;bar&#x27;)` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase(&#x27;jOhn DoE&#x27;)`
&quot;resourceType&quot;: &quot;A String&quot;, # Resource type for the Dynamic Group Query
},
],
@@ -241,7 +241,7 @@ Returns:
&quot;dynamicGroupMetadata&quot;: { # Dynamic group metadata like queries and status. # Optional. Dynamic group metadata like queries and status.
&quot;queries&quot;: [ # Memberships will be the union of all queries. Only one entry with USER resource is currently supported. Customers can create up to 100 dynamic groups.
{ # Defines a query on a resource.
- &quot;query&quot;: &quot;A String&quot;, # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department==&#x27;engineering&#x27;)` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area==&#x27;foo&#x27; &amp;&amp; loc.building_id==&#x27;bar&#x27;)`
+ &quot;query&quot;: &quot;A String&quot;, # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department==&#x27;engineering&#x27;)` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area==&#x27;foo&#x27; &amp;&amp; loc.building_id==&#x27;bar&#x27;)` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase(&#x27;jOhn DoE&#x27;)`
&quot;resourceType&quot;: &quot;A String&quot;, # Resource type for the Dynamic Group Query
},
],
@@ -293,7 +293,7 @@ Returns:
&quot;dynamicGroupMetadata&quot;: { # Dynamic group metadata like queries and status. # Optional. Dynamic group metadata like queries and status.
&quot;queries&quot;: [ # Memberships will be the union of all queries. Only one entry with USER resource is currently supported. Customers can create up to 100 dynamic groups.
{ # Defines a query on a resource.
- &quot;query&quot;: &quot;A String&quot;, # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department==&#x27;engineering&#x27;)` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area==&#x27;foo&#x27; &amp;&amp; loc.building_id==&#x27;bar&#x27;)`
+ &quot;query&quot;: &quot;A String&quot;, # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department==&#x27;engineering&#x27;)` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area==&#x27;foo&#x27; &amp;&amp; loc.building_id==&#x27;bar&#x27;)` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase(&#x27;jOhn DoE&#x27;)`
&quot;resourceType&quot;: &quot;A String&quot;, # Resource type for the Dynamic Group Query
},
],
@@ -368,7 +368,7 @@ Args:
&quot;dynamicGroupMetadata&quot;: { # Dynamic group metadata like queries and status. # Optional. Dynamic group metadata like queries and status.
&quot;queries&quot;: [ # Memberships will be the union of all queries. Only one entry with USER resource is currently supported. Customers can create up to 100 dynamic groups.
{ # Defines a query on a resource.
- &quot;query&quot;: &quot;A String&quot;, # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department==&#x27;engineering&#x27;)` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area==&#x27;foo&#x27; &amp;&amp; loc.building_id==&#x27;bar&#x27;)`
+ &quot;query&quot;: &quot;A String&quot;, # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department==&#x27;engineering&#x27;)` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area==&#x27;foo&#x27; &amp;&amp; loc.building_id==&#x27;bar&#x27;)` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase(&#x27;jOhn DoE&#x27;)`
&quot;resourceType&quot;: &quot;A String&quot;, # Resource type for the Dynamic Group Query
},
],
@@ -449,7 +449,7 @@ Returns:
&quot;dynamicGroupMetadata&quot;: { # Dynamic group metadata like queries and status. # Optional. Dynamic group metadata like queries and status.
&quot;queries&quot;: [ # Memberships will be the union of all queries. Only one entry with USER resource is currently supported. Customers can create up to 100 dynamic groups.
{ # Defines a query on a resource.
- &quot;query&quot;: &quot;A String&quot;, # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department==&#x27;engineering&#x27;)` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area==&#x27;foo&#x27; &amp;&amp; loc.building_id==&#x27;bar&#x27;)`
+ &quot;query&quot;: &quot;A String&quot;, # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department==&#x27;engineering&#x27;)` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area==&#x27;foo&#x27; &amp;&amp; loc.building_id==&#x27;bar&#x27;)` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase(&#x27;jOhn DoE&#x27;)`
&quot;resourceType&quot;: &quot;A String&quot;, # Resource type for the Dynamic Group Query
},
],
diff --git a/docs/dyn/cloudidentity_v1beta1.groups.html b/docs/dyn/cloudidentity_v1beta1.groups.html
index ed8234594..6cd4328ce 100644
--- a/docs/dyn/cloudidentity_v1beta1.groups.html
+++ b/docs/dyn/cloudidentity_v1beta1.groups.html
@@ -136,7 +136,7 @@ Args:
&quot;dynamicGroupMetadata&quot;: { # Dynamic group metadata like queries and status. # Optional. Dynamic group metadata like queries and status.
&quot;queries&quot;: [ # Memberships will be the union of all queries. Only one entry with USER resource is currently supported. Customers can create up to 100 dynamic groups.
{ # Defines a query on a resource.
- &quot;query&quot;: &quot;A String&quot;, # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department==&#x27;engineering&#x27;)` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area==&#x27;foo&#x27; &amp;&amp; loc.building_id==&#x27;bar&#x27;)`
+ &quot;query&quot;: &quot;A String&quot;, # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department==&#x27;engineering&#x27;)` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area==&#x27;foo&#x27; &amp;&amp; loc.building_id==&#x27;bar&#x27;)` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase(&#x27;jOhn DoE&#x27;)`
&quot;resourceType&quot;: &quot;A String&quot;,
},
],
@@ -260,7 +260,7 @@ Returns:
&quot;dynamicGroupMetadata&quot;: { # Dynamic group metadata like queries and status. # Optional. Dynamic group metadata like queries and status.
&quot;queries&quot;: [ # Memberships will be the union of all queries. Only one entry with USER resource is currently supported. Customers can create up to 100 dynamic groups.
{ # Defines a query on a resource.
- &quot;query&quot;: &quot;A String&quot;, # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department==&#x27;engineering&#x27;)` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area==&#x27;foo&#x27; &amp;&amp; loc.building_id==&#x27;bar&#x27;)`
+ &quot;query&quot;: &quot;A String&quot;, # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department==&#x27;engineering&#x27;)` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area==&#x27;foo&#x27; &amp;&amp; loc.building_id==&#x27;bar&#x27;)` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase(&#x27;jOhn DoE&#x27;)`
&quot;resourceType&quot;: &quot;A String&quot;,
},
],
@@ -325,7 +325,7 @@ Returns:
&quot;dynamicGroupMetadata&quot;: { # Dynamic group metadata like queries and status. # Optional. Dynamic group metadata like queries and status.
&quot;queries&quot;: [ # Memberships will be the union of all queries. Only one entry with USER resource is currently supported. Customers can create up to 100 dynamic groups.
{ # Defines a query on a resource.
- &quot;query&quot;: &quot;A String&quot;, # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department==&#x27;engineering&#x27;)` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area==&#x27;foo&#x27; &amp;&amp; loc.building_id==&#x27;bar&#x27;)`
+ &quot;query&quot;: &quot;A String&quot;, # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department==&#x27;engineering&#x27;)` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area==&#x27;foo&#x27; &amp;&amp; loc.building_id==&#x27;bar&#x27;)` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase(&#x27;jOhn DoE&#x27;)`
&quot;resourceType&quot;: &quot;A String&quot;,
},
],
@@ -413,7 +413,7 @@ Args:
&quot;dynamicGroupMetadata&quot;: { # Dynamic group metadata like queries and status. # Optional. Dynamic group metadata like queries and status.
&quot;queries&quot;: [ # Memberships will be the union of all queries. Only one entry with USER resource is currently supported. Customers can create up to 100 dynamic groups.
{ # Defines a query on a resource.
- &quot;query&quot;: &quot;A String&quot;, # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department==&#x27;engineering&#x27;)` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area==&#x27;foo&#x27; &amp;&amp; loc.building_id==&#x27;bar&#x27;)`
+ &quot;query&quot;: &quot;A String&quot;, # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department==&#x27;engineering&#x27;)` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area==&#x27;foo&#x27; &amp;&amp; loc.building_id==&#x27;bar&#x27;)` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase(&#x27;jOhn DoE&#x27;)`
&quot;resourceType&quot;: &quot;A String&quot;,
},
],
@@ -506,7 +506,7 @@ Returns:
&quot;dynamicGroupMetadata&quot;: { # Dynamic group metadata like queries and status. # Optional. Dynamic group metadata like queries and status.
&quot;queries&quot;: [ # Memberships will be the union of all queries. Only one entry with USER resource is currently supported. Customers can create up to 100 dynamic groups.
{ # Defines a query on a resource.
- &quot;query&quot;: &quot;A String&quot;, # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department==&#x27;engineering&#x27;)` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area==&#x27;foo&#x27; &amp;&amp; loc.building_id==&#x27;bar&#x27;)`
+ &quot;query&quot;: &quot;A String&quot;, # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department==&#x27;engineering&#x27;)` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area==&#x27;foo&#x27; &amp;&amp; loc.building_id==&#x27;bar&#x27;)` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase(&#x27;jOhn DoE&#x27;)`
&quot;resourceType&quot;: &quot;A String&quot;,
},
],
diff --git a/docs/dyn/composer_v1.projects.locations.environments.html b/docs/dyn/composer_v1.projects.locations.environments.html
index 848640035..90c7288f7 100644
--- a/docs/dyn/composer_v1.projects.locations.environments.html
+++ b/docs/dyn/composer_v1.projects.locations.environments.html
@@ -114,45 +114,45 @@ Args:
&quot;config&quot;: { # Configuration information for an environment. # Configuration parameters for this environment.
&quot;airflowUri&quot;: &quot;A String&quot;, # Output only. The URI of the Apache Airflow Web UI hosted within this environment (see [Airflow web interface](/composer/docs/how-to/accessing/airflow-web-interface)).
&quot;dagGcsPrefix&quot;: &quot;A String&quot;, # Output only. The Cloud Storage prefix of the DAGs for this environment. Although Cloud Storage objects reside in a flat namespace, a hierarchical file tree can be simulated using &quot;/&quot;-delimited object name prefixes. DAG objects for this environment reside in a simulated directory with the given prefix.
- &quot;databaseConfig&quot;: { # The configuration of Cloud SQL instance that is used by the Apache Airflow software. # Optional. The configuration settings for Cloud SQL instance used internally by Apache Airflow software.
+ &quot;databaseConfig&quot;: { # The configuration of Cloud SQL instance that is used by the Apache Airflow software. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The configuration settings for Cloud SQL instance used internally by Apache Airflow software. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;machineType&quot;: &quot;A String&quot;, # Optional. Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
},
- &quot;encryptionConfig&quot;: { # The encryption options for the Cloud Composer environment and its dependencies. # Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated.
+ &quot;encryptionConfig&quot;: { # The encryption options for the Cloud Composer environment and its dependencies.Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;kmsKeyName&quot;: &quot;A String&quot;, # Optional. Customer-managed Encryption Key available through Google&#x27;s Key Management Service. Cannot be updated. If not specified, Google-managed key will be used.
},
&quot;gkeCluster&quot;: &quot;A String&quot;, # Output only. The Kubernetes Engine cluster used to run this environment.
&quot;nodeConfig&quot;: { # The configuration information for the Kubernetes Engine nodes running the Apache Airflow software. # The configuration used for the Kubernetes Engine cluster.
- &quot;diskSizeGb&quot;: 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated.
+ &quot;diskSizeGb&quot;: 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;ipAllocationPolicy&quot;: { # Configuration for controlling how IPs are allocated in the GKE cluster running the Apache Airflow software. # Optional. The configuration for controlling how IPs are allocated in the GKE cluster.
- &quot;clusterIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. This field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.
- &quot;clusterSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the GKE cluster&#x27;s secondary range used to allocate IP addresses to pods. This field is applicable only when `use_ip_aliases` is true.
- &quot;servicesIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range of the services IP addresses in this GKE cluster. This field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.
- &quot;servicesSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the services&#x27; secondary range used to allocate IP addresses to the GKE cluster. This field is applicable only when `use_ip_aliases` is true.
- &quot;useIpAliases&quot;: True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created.
+ &quot;clusterIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.
+ &quot;clusterSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the GKE cluster&#x27;s secondary range used to allocate IP addresses to pods. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
+ &quot;servicesIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range of the services IP addresses in this GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.
+ &quot;servicesSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the services&#x27; secondary range used to allocate IP addresses to the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
+ &quot;useIpAliases&quot;: True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters.
},
- &quot;location&quot;: &quot;A String&quot;, # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/zones/{zoneId}&quot;. This `location` must belong to the enclosing environment&#x27;s project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field.
- &quot;machineType&quot;: &quot;A String&quot;, # Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}&quot;. The `machineType` must belong to the enclosing environment&#x27;s project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to &quot;n1-standard-1&quot;.
+ &quot;location&quot;: &quot;A String&quot;, # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/zones/{zoneId}&quot;. This `location` must belong to the enclosing environment&#x27;s project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
+ &quot;machineType&quot;: &quot;A String&quot;, # Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}&quot;. The `machineType` must belong to the enclosing environment&#x27;s project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to &quot;n1-standard-1&quot;. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;network&quot;: &quot;A String&quot;, # Optional. The Compute Engine network to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/global/networks/{networkId}&quot;. If unspecified, the &quot;default&quot; network ID in the environment&#x27;s project is used. If a [Custom Subnet Network](/vpc/docs/vpc#vpc_networks_and_subnets) is provided, `nodeConfig.subnetwork` must also be provided. For [Shared VPC](/vpc/docs/shared-vpc) subnetwork requirements, see `nodeConfig.subnetwork`.
- &quot;oauthScopes&quot;: [ # Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to [&quot;https://www.googleapis.com/auth/cloud-platform&quot;]. Cannot be updated.
+ &quot;oauthScopes&quot;: [ # Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to [&quot;https://www.googleapis.com/auth/cloud-platform&quot;]. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;A String&quot;,
],
&quot;serviceAccount&quot;: &quot;A String&quot;, # Optional. The Google Cloud Platform Service Account to be used by the node VMs. If a service account is not specified, the &quot;default&quot; Compute Engine service account is used. Cannot be updated.
&quot;subnetwork&quot;: &quot;A String&quot;, # Optional. The Compute Engine subnetwork to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/regions/{regionId}/subnetworks/{subnetworkId}&quot; If a subnetwork is provided, `nodeConfig.network` must also be provided, and the subnetwork must belong to the enclosing environment&#x27;s project and location.
- &quot;tags&quot;: [ # Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated.
+ &quot;tags&quot;: [ # Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;A String&quot;,
],
},
- &quot;nodeCount&quot;: 42, # The number of nodes in the Kubernetes Engine cluster that will be used to run this environment.
+ &quot;nodeCount&quot;: 42, # The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;privateEnvironmentConfig&quot;: { # The configuration information for configuring a Private IP Cloud Composer environment. # The configuration used for the Private IP Cloud Composer environment.
&quot;cloudSqlIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from `web_server_ipv4_cidr_block`.
- &quot;enablePrivateEnvironment&quot;: True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true.
+ &quot;enablePrivateEnvironment&quot;: True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;privateClusterConfig&quot;: { # Configuration options for the private GKE cluster in a Cloud Composer environment. # Optional. Configuration for the private GKE cluster for a Private IP Cloud Composer environment.
&quot;enablePrivateEndpoint&quot;: True or False, # Optional. If `true`, access to the public endpoint of the GKE cluster is denied.
&quot;masterIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The CIDR block from which IPv4 range for GKE master will be reserved. If left blank, the default value of &#x27;172.16.0.0/23&#x27; is used.
&quot;masterIpv4ReservedRange&quot;: &quot;A String&quot;, # Output only. The IP range in CIDR notation to use for the hosted master network. This range is used for assigning internal IP addresses to the GKE cluster master or set of masters and to the internal load balancer virtual IP. This range must not overlap with any other ranges in use within the cluster&#x27;s network.
},
- &quot;webServerIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`.
- &quot;webServerIpv4ReservedRange&quot;: &quot;A String&quot;, # Output only. The IP range reserved for the tenant project&#x27;s App Engine VMs.
+ &quot;webServerIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
+ &quot;webServerIpv4ReservedRange&quot;: &quot;A String&quot;, # Output only. The IP range reserved for the tenant project&#x27;s App Engine VMs. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
},
&quot;softwareConfig&quot;: { # Specifies the selection and configuration of software inside the environment. # The configuration settings for software inside the environment.
&quot;airflowConfigOverrides&quot;: { # Optional. Apache Airflow configuration properties to override. Property keys contain the section and property names, separated by a hyphen, for example &quot;core-dags_are_paused_at_creation&quot;. Section names must not contain hyphens (&quot;-&quot;), opening square brackets (&quot;[&quot;), or closing square brackets (&quot;]&quot;). The property name must not be empty and must not contain an equals sign (&quot;=&quot;) or semicolon (&quot;;&quot;). Section and property names must not contain a period (&quot;.&quot;). Apache Airflow configuration property names must be written in [snake_case](https://en.wikipedia.org/wiki/Snake_case). Property values can contain any character, and can be written in any lower/upper case format. Certain Apache Airflow configuration property values are [blocked](/composer/docs/concepts/airflow-configurations), and cannot be overridden.
@@ -165,13 +165,13 @@ Args:
&quot;pypiPackages&quot;: { # Optional. Custom Python Package Index (PyPI) packages to be installed in the environment. Keys refer to the lowercase package name such as &quot;numpy&quot; and values are the lowercase extras and version specifier such as &quot;==1.12.0&quot;, &quot;[devel,gcp_api]&quot;, or &quot;[devel]&gt;=1.8.2, &lt;1.9.2&quot;. To specify a package without pinning it to a version specifier, use the empty string as the value.
&quot;a_key&quot;: &quot;A String&quot;,
},
- &quot;pythonVersion&quot;: &quot;A String&quot;, # Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to &#x27;2&#x27; or &#x27;3&#x27;. If not specified, the default is &#x27;3&#x27;. Cannot be updated.
+ &quot;pythonVersion&quot;: &quot;A String&quot;, # Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to &#x27;2&#x27; or &#x27;3&#x27;. If not specified, the default is &#x27;3&#x27;. Cannot be updated. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use Python major version 3.
&quot;schedulerCount&quot;: 42, # Optional. The number of schedulers for Airflow. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*.
},
- &quot;webServerConfig&quot;: { # The configuration settings for the Airflow web server App Engine instance. # Optional. The configuration settings for the Airflow web server App Engine instance.
+ &quot;webServerConfig&quot;: { # The configuration settings for the Airflow web server App Engine instance. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.* # Optional. The configuration settings for the Airflow web server App Engine instance.
&quot;machineType&quot;: &quot;A String&quot;, # Optional. Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. If not specified, composer-n1-webserver-2 will be used. Value custom is returned only in response, if Airflow web server parameters were manually changed to a non-standard values.
},
- &quot;webServerNetworkAccessControl&quot;: { # Network-level access control policy for the Airflow web server. # Optional. The network-level access control policy for the Airflow web server. If unspecified, no network-level access restrictions will be applied.
+ &quot;webServerNetworkAccessControl&quot;: { # Network-level access control policy for the Airflow web server. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The network-level access control policy for the Airflow web server. If unspecified, no network-level access restrictions will be applied. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;allowedIpRanges&quot;: [ # A collection of allowed IP ranges with descriptions.
{ # Allowed IP range with user-provided description.
&quot;description&quot;: &quot;A String&quot;, # Optional. User-provided description. It must contain at most 300 characters.
@@ -272,45 +272,45 @@ Returns:
&quot;config&quot;: { # Configuration information for an environment. # Configuration parameters for this environment.
&quot;airflowUri&quot;: &quot;A String&quot;, # Output only. The URI of the Apache Airflow Web UI hosted within this environment (see [Airflow web interface](/composer/docs/how-to/accessing/airflow-web-interface)).
&quot;dagGcsPrefix&quot;: &quot;A String&quot;, # Output only. The Cloud Storage prefix of the DAGs for this environment. Although Cloud Storage objects reside in a flat namespace, a hierarchical file tree can be simulated using &quot;/&quot;-delimited object name prefixes. DAG objects for this environment reside in a simulated directory with the given prefix.
- &quot;databaseConfig&quot;: { # The configuration of Cloud SQL instance that is used by the Apache Airflow software. # Optional. The configuration settings for Cloud SQL instance used internally by Apache Airflow software.
+ &quot;databaseConfig&quot;: { # The configuration of Cloud SQL instance that is used by the Apache Airflow software. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The configuration settings for Cloud SQL instance used internally by Apache Airflow software. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;machineType&quot;: &quot;A String&quot;, # Optional. Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
},
- &quot;encryptionConfig&quot;: { # The encryption options for the Cloud Composer environment and its dependencies. # Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated.
+ &quot;encryptionConfig&quot;: { # The encryption options for the Cloud Composer environment and its dependencies.Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;kmsKeyName&quot;: &quot;A String&quot;, # Optional. Customer-managed Encryption Key available through Google&#x27;s Key Management Service. Cannot be updated. If not specified, Google-managed key will be used.
},
&quot;gkeCluster&quot;: &quot;A String&quot;, # Output only. The Kubernetes Engine cluster used to run this environment.
&quot;nodeConfig&quot;: { # The configuration information for the Kubernetes Engine nodes running the Apache Airflow software. # The configuration used for the Kubernetes Engine cluster.
- &quot;diskSizeGb&quot;: 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated.
+ &quot;diskSizeGb&quot;: 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;ipAllocationPolicy&quot;: { # Configuration for controlling how IPs are allocated in the GKE cluster running the Apache Airflow software. # Optional. The configuration for controlling how IPs are allocated in the GKE cluster.
- &quot;clusterIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. This field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.
- &quot;clusterSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the GKE cluster&#x27;s secondary range used to allocate IP addresses to pods. This field is applicable only when `use_ip_aliases` is true.
- &quot;servicesIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range of the services IP addresses in this GKE cluster. This field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.
- &quot;servicesSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the services&#x27; secondary range used to allocate IP addresses to the GKE cluster. This field is applicable only when `use_ip_aliases` is true.
- &quot;useIpAliases&quot;: True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created.
+ &quot;clusterIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.
+ &quot;clusterSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the GKE cluster&#x27;s secondary range used to allocate IP addresses to pods. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
+ &quot;servicesIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range of the services IP addresses in this GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.
+ &quot;servicesSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the services&#x27; secondary range used to allocate IP addresses to the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
+ &quot;useIpAliases&quot;: True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters.
},
- &quot;location&quot;: &quot;A String&quot;, # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/zones/{zoneId}&quot;. This `location` must belong to the enclosing environment&#x27;s project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field.
- &quot;machineType&quot;: &quot;A String&quot;, # Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}&quot;. The `machineType` must belong to the enclosing environment&#x27;s project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to &quot;n1-standard-1&quot;.
+ &quot;location&quot;: &quot;A String&quot;, # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/zones/{zoneId}&quot;. This `location` must belong to the enclosing environment&#x27;s project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
+ &quot;machineType&quot;: &quot;A String&quot;, # Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}&quot;. The `machineType` must belong to the enclosing environment&#x27;s project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to &quot;n1-standard-1&quot;. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;network&quot;: &quot;A String&quot;, # Optional. The Compute Engine network to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/global/networks/{networkId}&quot;. If unspecified, the &quot;default&quot; network ID in the environment&#x27;s project is used. If a [Custom Subnet Network](/vpc/docs/vpc#vpc_networks_and_subnets) is provided, `nodeConfig.subnetwork` must also be provided. For [Shared VPC](/vpc/docs/shared-vpc) subnetwork requirements, see `nodeConfig.subnetwork`.
- &quot;oauthScopes&quot;: [ # Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to [&quot;https://www.googleapis.com/auth/cloud-platform&quot;]. Cannot be updated.
+ &quot;oauthScopes&quot;: [ # Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to [&quot;https://www.googleapis.com/auth/cloud-platform&quot;]. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;A String&quot;,
],
&quot;serviceAccount&quot;: &quot;A String&quot;, # Optional. The Google Cloud Platform Service Account to be used by the node VMs. If a service account is not specified, the &quot;default&quot; Compute Engine service account is used. Cannot be updated.
&quot;subnetwork&quot;: &quot;A String&quot;, # Optional. The Compute Engine subnetwork to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/regions/{regionId}/subnetworks/{subnetworkId}&quot; If a subnetwork is provided, `nodeConfig.network` must also be provided, and the subnetwork must belong to the enclosing environment&#x27;s project and location.
- &quot;tags&quot;: [ # Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated.
+ &quot;tags&quot;: [ # Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;A String&quot;,
],
},
- &quot;nodeCount&quot;: 42, # The number of nodes in the Kubernetes Engine cluster that will be used to run this environment.
+ &quot;nodeCount&quot;: 42, # The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;privateEnvironmentConfig&quot;: { # The configuration information for configuring a Private IP Cloud Composer environment. # The configuration used for the Private IP Cloud Composer environment.
&quot;cloudSqlIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from `web_server_ipv4_cidr_block`.
- &quot;enablePrivateEnvironment&quot;: True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true.
+ &quot;enablePrivateEnvironment&quot;: True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;privateClusterConfig&quot;: { # Configuration options for the private GKE cluster in a Cloud Composer environment. # Optional. Configuration for the private GKE cluster for a Private IP Cloud Composer environment.
&quot;enablePrivateEndpoint&quot;: True or False, # Optional. If `true`, access to the public endpoint of the GKE cluster is denied.
&quot;masterIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The CIDR block from which IPv4 range for GKE master will be reserved. If left blank, the default value of &#x27;172.16.0.0/23&#x27; is used.
&quot;masterIpv4ReservedRange&quot;: &quot;A String&quot;, # Output only. The IP range in CIDR notation to use for the hosted master network. This range is used for assigning internal IP addresses to the GKE cluster master or set of masters and to the internal load balancer virtual IP. This range must not overlap with any other ranges in use within the cluster&#x27;s network.
},
- &quot;webServerIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`.
- &quot;webServerIpv4ReservedRange&quot;: &quot;A String&quot;, # Output only. The IP range reserved for the tenant project&#x27;s App Engine VMs.
+ &quot;webServerIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
+ &quot;webServerIpv4ReservedRange&quot;: &quot;A String&quot;, # Output only. The IP range reserved for the tenant project&#x27;s App Engine VMs. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
},
&quot;softwareConfig&quot;: { # Specifies the selection and configuration of software inside the environment. # The configuration settings for software inside the environment.
&quot;airflowConfigOverrides&quot;: { # Optional. Apache Airflow configuration properties to override. Property keys contain the section and property names, separated by a hyphen, for example &quot;core-dags_are_paused_at_creation&quot;. Section names must not contain hyphens (&quot;-&quot;), opening square brackets (&quot;[&quot;), or closing square brackets (&quot;]&quot;). The property name must not be empty and must not contain an equals sign (&quot;=&quot;) or semicolon (&quot;;&quot;). Section and property names must not contain a period (&quot;.&quot;). Apache Airflow configuration property names must be written in [snake_case](https://en.wikipedia.org/wiki/Snake_case). Property values can contain any character, and can be written in any lower/upper case format. Certain Apache Airflow configuration property values are [blocked](/composer/docs/concepts/airflow-configurations), and cannot be overridden.
@@ -323,13 +323,13 @@ Returns:
&quot;pypiPackages&quot;: { # Optional. Custom Python Package Index (PyPI) packages to be installed in the environment. Keys refer to the lowercase package name such as &quot;numpy&quot; and values are the lowercase extras and version specifier such as &quot;==1.12.0&quot;, &quot;[devel,gcp_api]&quot;, or &quot;[devel]&gt;=1.8.2, &lt;1.9.2&quot;. To specify a package without pinning it to a version specifier, use the empty string as the value.
&quot;a_key&quot;: &quot;A String&quot;,
},
- &quot;pythonVersion&quot;: &quot;A String&quot;, # Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to &#x27;2&#x27; or &#x27;3&#x27;. If not specified, the default is &#x27;3&#x27;. Cannot be updated.
+ &quot;pythonVersion&quot;: &quot;A String&quot;, # Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to &#x27;2&#x27; or &#x27;3&#x27;. If not specified, the default is &#x27;3&#x27;. Cannot be updated. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use Python major version 3.
&quot;schedulerCount&quot;: 42, # Optional. The number of schedulers for Airflow. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*.
},
- &quot;webServerConfig&quot;: { # The configuration settings for the Airflow web server App Engine instance. # Optional. The configuration settings for the Airflow web server App Engine instance.
+ &quot;webServerConfig&quot;: { # The configuration settings for the Airflow web server App Engine instance. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.* # Optional. The configuration settings for the Airflow web server App Engine instance.
&quot;machineType&quot;: &quot;A String&quot;, # Optional. Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. If not specified, composer-n1-webserver-2 will be used. Value custom is returned only in response, if Airflow web server parameters were manually changed to a non-standard values.
},
- &quot;webServerNetworkAccessControl&quot;: { # Network-level access control policy for the Airflow web server. # Optional. The network-level access control policy for the Airflow web server. If unspecified, no network-level access restrictions will be applied.
+ &quot;webServerNetworkAccessControl&quot;: { # Network-level access control policy for the Airflow web server. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The network-level access control policy for the Airflow web server. If unspecified, no network-level access restrictions will be applied. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;allowedIpRanges&quot;: [ # A collection of allowed IP ranges with descriptions.
{ # Allowed IP range with user-provided description.
&quot;description&quot;: &quot;A String&quot;, # Optional. User-provided description. It must contain at most 300 characters.
@@ -371,45 +371,45 @@ Returns:
&quot;config&quot;: { # Configuration information for an environment. # Configuration parameters for this environment.
&quot;airflowUri&quot;: &quot;A String&quot;, # Output only. The URI of the Apache Airflow Web UI hosted within this environment (see [Airflow web interface](/composer/docs/how-to/accessing/airflow-web-interface)).
&quot;dagGcsPrefix&quot;: &quot;A String&quot;, # Output only. The Cloud Storage prefix of the DAGs for this environment. Although Cloud Storage objects reside in a flat namespace, a hierarchical file tree can be simulated using &quot;/&quot;-delimited object name prefixes. DAG objects for this environment reside in a simulated directory with the given prefix.
- &quot;databaseConfig&quot;: { # The configuration of Cloud SQL instance that is used by the Apache Airflow software. # Optional. The configuration settings for Cloud SQL instance used internally by Apache Airflow software.
+ &quot;databaseConfig&quot;: { # The configuration of Cloud SQL instance that is used by the Apache Airflow software. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The configuration settings for Cloud SQL instance used internally by Apache Airflow software. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;machineType&quot;: &quot;A String&quot;, # Optional. Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
},
- &quot;encryptionConfig&quot;: { # The encryption options for the Cloud Composer environment and its dependencies. # Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated.
+ &quot;encryptionConfig&quot;: { # The encryption options for the Cloud Composer environment and its dependencies.Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;kmsKeyName&quot;: &quot;A String&quot;, # Optional. Customer-managed Encryption Key available through Google&#x27;s Key Management Service. Cannot be updated. If not specified, Google-managed key will be used.
},
&quot;gkeCluster&quot;: &quot;A String&quot;, # Output only. The Kubernetes Engine cluster used to run this environment.
&quot;nodeConfig&quot;: { # The configuration information for the Kubernetes Engine nodes running the Apache Airflow software. # The configuration used for the Kubernetes Engine cluster.
- &quot;diskSizeGb&quot;: 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated.
+ &quot;diskSizeGb&quot;: 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;ipAllocationPolicy&quot;: { # Configuration for controlling how IPs are allocated in the GKE cluster running the Apache Airflow software. # Optional. The configuration for controlling how IPs are allocated in the GKE cluster.
- &quot;clusterIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. This field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.
- &quot;clusterSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the GKE cluster&#x27;s secondary range used to allocate IP addresses to pods. This field is applicable only when `use_ip_aliases` is true.
- &quot;servicesIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range of the services IP addresses in this GKE cluster. This field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.
- &quot;servicesSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the services&#x27; secondary range used to allocate IP addresses to the GKE cluster. This field is applicable only when `use_ip_aliases` is true.
- &quot;useIpAliases&quot;: True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created.
+ &quot;clusterIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.
+ &quot;clusterSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the GKE cluster&#x27;s secondary range used to allocate IP addresses to pods. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
+ &quot;servicesIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range of the services IP addresses in this GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.
+ &quot;servicesSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the services&#x27; secondary range used to allocate IP addresses to the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
+ &quot;useIpAliases&quot;: True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters.
},
- &quot;location&quot;: &quot;A String&quot;, # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/zones/{zoneId}&quot;. This `location` must belong to the enclosing environment&#x27;s project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field.
- &quot;machineType&quot;: &quot;A String&quot;, # Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}&quot;. The `machineType` must belong to the enclosing environment&#x27;s project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to &quot;n1-standard-1&quot;.
+ &quot;location&quot;: &quot;A String&quot;, # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/zones/{zoneId}&quot;. This `location` must belong to the enclosing environment&#x27;s project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
+ &quot;machineType&quot;: &quot;A String&quot;, # Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}&quot;. The `machineType` must belong to the enclosing environment&#x27;s project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to &quot;n1-standard-1&quot;. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;network&quot;: &quot;A String&quot;, # Optional. The Compute Engine network to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/global/networks/{networkId}&quot;. If unspecified, the &quot;default&quot; network ID in the environment&#x27;s project is used. If a [Custom Subnet Network](/vpc/docs/vpc#vpc_networks_and_subnets) is provided, `nodeConfig.subnetwork` must also be provided. For [Shared VPC](/vpc/docs/shared-vpc) subnetwork requirements, see `nodeConfig.subnetwork`.
- &quot;oauthScopes&quot;: [ # Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to [&quot;https://www.googleapis.com/auth/cloud-platform&quot;]. Cannot be updated.
+ &quot;oauthScopes&quot;: [ # Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to [&quot;https://www.googleapis.com/auth/cloud-platform&quot;]. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;A String&quot;,
],
&quot;serviceAccount&quot;: &quot;A String&quot;, # Optional. The Google Cloud Platform Service Account to be used by the node VMs. If a service account is not specified, the &quot;default&quot; Compute Engine service account is used. Cannot be updated.
&quot;subnetwork&quot;: &quot;A String&quot;, # Optional. The Compute Engine subnetwork to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/regions/{regionId}/subnetworks/{subnetworkId}&quot; If a subnetwork is provided, `nodeConfig.network` must also be provided, and the subnetwork must belong to the enclosing environment&#x27;s project and location.
- &quot;tags&quot;: [ # Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated.
+ &quot;tags&quot;: [ # Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;A String&quot;,
],
},
- &quot;nodeCount&quot;: 42, # The number of nodes in the Kubernetes Engine cluster that will be used to run this environment.
+ &quot;nodeCount&quot;: 42, # The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;privateEnvironmentConfig&quot;: { # The configuration information for configuring a Private IP Cloud Composer environment. # The configuration used for the Private IP Cloud Composer environment.
&quot;cloudSqlIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from `web_server_ipv4_cidr_block`.
- &quot;enablePrivateEnvironment&quot;: True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true.
+ &quot;enablePrivateEnvironment&quot;: True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;privateClusterConfig&quot;: { # Configuration options for the private GKE cluster in a Cloud Composer environment. # Optional. Configuration for the private GKE cluster for a Private IP Cloud Composer environment.
&quot;enablePrivateEndpoint&quot;: True or False, # Optional. If `true`, access to the public endpoint of the GKE cluster is denied.
&quot;masterIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The CIDR block from which IPv4 range for GKE master will be reserved. If left blank, the default value of &#x27;172.16.0.0/23&#x27; is used.
&quot;masterIpv4ReservedRange&quot;: &quot;A String&quot;, # Output only. The IP range in CIDR notation to use for the hosted master network. This range is used for assigning internal IP addresses to the GKE cluster master or set of masters and to the internal load balancer virtual IP. This range must not overlap with any other ranges in use within the cluster&#x27;s network.
},
- &quot;webServerIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`.
- &quot;webServerIpv4ReservedRange&quot;: &quot;A String&quot;, # Output only. The IP range reserved for the tenant project&#x27;s App Engine VMs.
+ &quot;webServerIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
+ &quot;webServerIpv4ReservedRange&quot;: &quot;A String&quot;, # Output only. The IP range reserved for the tenant project&#x27;s App Engine VMs. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
},
&quot;softwareConfig&quot;: { # Specifies the selection and configuration of software inside the environment. # The configuration settings for software inside the environment.
&quot;airflowConfigOverrides&quot;: { # Optional. Apache Airflow configuration properties to override. Property keys contain the section and property names, separated by a hyphen, for example &quot;core-dags_are_paused_at_creation&quot;. Section names must not contain hyphens (&quot;-&quot;), opening square brackets (&quot;[&quot;), or closing square brackets (&quot;]&quot;). The property name must not be empty and must not contain an equals sign (&quot;=&quot;) or semicolon (&quot;;&quot;). Section and property names must not contain a period (&quot;.&quot;). Apache Airflow configuration property names must be written in [snake_case](https://en.wikipedia.org/wiki/Snake_case). Property values can contain any character, and can be written in any lower/upper case format. Certain Apache Airflow configuration property values are [blocked](/composer/docs/concepts/airflow-configurations), and cannot be overridden.
@@ -422,13 +422,13 @@ Returns:
&quot;pypiPackages&quot;: { # Optional. Custom Python Package Index (PyPI) packages to be installed in the environment. Keys refer to the lowercase package name such as &quot;numpy&quot; and values are the lowercase extras and version specifier such as &quot;==1.12.0&quot;, &quot;[devel,gcp_api]&quot;, or &quot;[devel]&gt;=1.8.2, &lt;1.9.2&quot;. To specify a package without pinning it to a version specifier, use the empty string as the value.
&quot;a_key&quot;: &quot;A String&quot;,
},
- &quot;pythonVersion&quot;: &quot;A String&quot;, # Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to &#x27;2&#x27; or &#x27;3&#x27;. If not specified, the default is &#x27;3&#x27;. Cannot be updated.
+ &quot;pythonVersion&quot;: &quot;A String&quot;, # Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to &#x27;2&#x27; or &#x27;3&#x27;. If not specified, the default is &#x27;3&#x27;. Cannot be updated. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use Python major version 3.
&quot;schedulerCount&quot;: 42, # Optional. The number of schedulers for Airflow. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*.
},
- &quot;webServerConfig&quot;: { # The configuration settings for the Airflow web server App Engine instance. # Optional. The configuration settings for the Airflow web server App Engine instance.
+ &quot;webServerConfig&quot;: { # The configuration settings for the Airflow web server App Engine instance. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.* # Optional. The configuration settings for the Airflow web server App Engine instance.
&quot;machineType&quot;: &quot;A String&quot;, # Optional. Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. If not specified, composer-n1-webserver-2 will be used. Value custom is returned only in response, if Airflow web server parameters were manually changed to a non-standard values.
},
- &quot;webServerNetworkAccessControl&quot;: { # Network-level access control policy for the Airflow web server. # Optional. The network-level access control policy for the Airflow web server. If unspecified, no network-level access restrictions will be applied.
+ &quot;webServerNetworkAccessControl&quot;: { # Network-level access control policy for the Airflow web server. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The network-level access control policy for the Airflow web server. If unspecified, no network-level access restrictions will be applied. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;allowedIpRanges&quot;: [ # A collection of allowed IP ranges with descriptions.
{ # Allowed IP range with user-provided description.
&quot;description&quot;: &quot;A String&quot;, # Optional. User-provided description. It must contain at most 300 characters.
@@ -478,45 +478,45 @@ Args:
&quot;config&quot;: { # Configuration information for an environment. # Configuration parameters for this environment.
&quot;airflowUri&quot;: &quot;A String&quot;, # Output only. The URI of the Apache Airflow Web UI hosted within this environment (see [Airflow web interface](/composer/docs/how-to/accessing/airflow-web-interface)).
&quot;dagGcsPrefix&quot;: &quot;A String&quot;, # Output only. The Cloud Storage prefix of the DAGs for this environment. Although Cloud Storage objects reside in a flat namespace, a hierarchical file tree can be simulated using &quot;/&quot;-delimited object name prefixes. DAG objects for this environment reside in a simulated directory with the given prefix.
- &quot;databaseConfig&quot;: { # The configuration of Cloud SQL instance that is used by the Apache Airflow software. # Optional. The configuration settings for Cloud SQL instance used internally by Apache Airflow software.
+ &quot;databaseConfig&quot;: { # The configuration of Cloud SQL instance that is used by the Apache Airflow software. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The configuration settings for Cloud SQL instance used internally by Apache Airflow software. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;machineType&quot;: &quot;A String&quot;, # Optional. Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
},
- &quot;encryptionConfig&quot;: { # The encryption options for the Cloud Composer environment and its dependencies. # Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated.
+ &quot;encryptionConfig&quot;: { # The encryption options for the Cloud Composer environment and its dependencies.Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;kmsKeyName&quot;: &quot;A String&quot;, # Optional. Customer-managed Encryption Key available through Google&#x27;s Key Management Service. Cannot be updated. If not specified, Google-managed key will be used.
},
&quot;gkeCluster&quot;: &quot;A String&quot;, # Output only. The Kubernetes Engine cluster used to run this environment.
&quot;nodeConfig&quot;: { # The configuration information for the Kubernetes Engine nodes running the Apache Airflow software. # The configuration used for the Kubernetes Engine cluster.
- &quot;diskSizeGb&quot;: 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated.
+ &quot;diskSizeGb&quot;: 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;ipAllocationPolicy&quot;: { # Configuration for controlling how IPs are allocated in the GKE cluster running the Apache Airflow software. # Optional. The configuration for controlling how IPs are allocated in the GKE cluster.
- &quot;clusterIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. This field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.
- &quot;clusterSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the GKE cluster&#x27;s secondary range used to allocate IP addresses to pods. This field is applicable only when `use_ip_aliases` is true.
- &quot;servicesIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range of the services IP addresses in this GKE cluster. This field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.
- &quot;servicesSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the services&#x27; secondary range used to allocate IP addresses to the GKE cluster. This field is applicable only when `use_ip_aliases` is true.
- &quot;useIpAliases&quot;: True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created.
+ &quot;clusterIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.
+ &quot;clusterSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the GKE cluster&#x27;s secondary range used to allocate IP addresses to pods. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
+ &quot;servicesIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range of the services IP addresses in this GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.
+ &quot;servicesSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the services&#x27; secondary range used to allocate IP addresses to the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
+ &quot;useIpAliases&quot;: True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters.
},
- &quot;location&quot;: &quot;A String&quot;, # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/zones/{zoneId}&quot;. This `location` must belong to the enclosing environment&#x27;s project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field.
- &quot;machineType&quot;: &quot;A String&quot;, # Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}&quot;. The `machineType` must belong to the enclosing environment&#x27;s project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to &quot;n1-standard-1&quot;.
+ &quot;location&quot;: &quot;A String&quot;, # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/zones/{zoneId}&quot;. This `location` must belong to the enclosing environment&#x27;s project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
+ &quot;machineType&quot;: &quot;A String&quot;, # Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}&quot;. The `machineType` must belong to the enclosing environment&#x27;s project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to &quot;n1-standard-1&quot;. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;network&quot;: &quot;A String&quot;, # Optional. The Compute Engine network to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/global/networks/{networkId}&quot;. If unspecified, the &quot;default&quot; network ID in the environment&#x27;s project is used. If a [Custom Subnet Network](/vpc/docs/vpc#vpc_networks_and_subnets) is provided, `nodeConfig.subnetwork` must also be provided. For [Shared VPC](/vpc/docs/shared-vpc) subnetwork requirements, see `nodeConfig.subnetwork`.
- &quot;oauthScopes&quot;: [ # Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to [&quot;https://www.googleapis.com/auth/cloud-platform&quot;]. Cannot be updated.
+ &quot;oauthScopes&quot;: [ # Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to [&quot;https://www.googleapis.com/auth/cloud-platform&quot;]. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;A String&quot;,
],
&quot;serviceAccount&quot;: &quot;A String&quot;, # Optional. The Google Cloud Platform Service Account to be used by the node VMs. If a service account is not specified, the &quot;default&quot; Compute Engine service account is used. Cannot be updated.
&quot;subnetwork&quot;: &quot;A String&quot;, # Optional. The Compute Engine subnetwork to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/regions/{regionId}/subnetworks/{subnetworkId}&quot; If a subnetwork is provided, `nodeConfig.network` must also be provided, and the subnetwork must belong to the enclosing environment&#x27;s project and location.
- &quot;tags&quot;: [ # Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated.
+ &quot;tags&quot;: [ # Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;A String&quot;,
],
},
- &quot;nodeCount&quot;: 42, # The number of nodes in the Kubernetes Engine cluster that will be used to run this environment.
+ &quot;nodeCount&quot;: 42, # The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;privateEnvironmentConfig&quot;: { # The configuration information for configuring a Private IP Cloud Composer environment. # The configuration used for the Private IP Cloud Composer environment.
&quot;cloudSqlIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from `web_server_ipv4_cidr_block`.
- &quot;enablePrivateEnvironment&quot;: True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true.
+ &quot;enablePrivateEnvironment&quot;: True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;privateClusterConfig&quot;: { # Configuration options for the private GKE cluster in a Cloud Composer environment. # Optional. Configuration for the private GKE cluster for a Private IP Cloud Composer environment.
&quot;enablePrivateEndpoint&quot;: True or False, # Optional. If `true`, access to the public endpoint of the GKE cluster is denied.
&quot;masterIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The CIDR block from which IPv4 range for GKE master will be reserved. If left blank, the default value of &#x27;172.16.0.0/23&#x27; is used.
&quot;masterIpv4ReservedRange&quot;: &quot;A String&quot;, # Output only. The IP range in CIDR notation to use for the hosted master network. This range is used for assigning internal IP addresses to the GKE cluster master or set of masters and to the internal load balancer virtual IP. This range must not overlap with any other ranges in use within the cluster&#x27;s network.
},
- &quot;webServerIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`.
- &quot;webServerIpv4ReservedRange&quot;: &quot;A String&quot;, # Output only. The IP range reserved for the tenant project&#x27;s App Engine VMs.
+ &quot;webServerIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
+ &quot;webServerIpv4ReservedRange&quot;: &quot;A String&quot;, # Output only. The IP range reserved for the tenant project&#x27;s App Engine VMs. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
},
&quot;softwareConfig&quot;: { # Specifies the selection and configuration of software inside the environment. # The configuration settings for software inside the environment.
&quot;airflowConfigOverrides&quot;: { # Optional. Apache Airflow configuration properties to override. Property keys contain the section and property names, separated by a hyphen, for example &quot;core-dags_are_paused_at_creation&quot;. Section names must not contain hyphens (&quot;-&quot;), opening square brackets (&quot;[&quot;), or closing square brackets (&quot;]&quot;). The property name must not be empty and must not contain an equals sign (&quot;=&quot;) or semicolon (&quot;;&quot;). Section and property names must not contain a period (&quot;.&quot;). Apache Airflow configuration property names must be written in [snake_case](https://en.wikipedia.org/wiki/Snake_case). Property values can contain any character, and can be written in any lower/upper case format. Certain Apache Airflow configuration property values are [blocked](/composer/docs/concepts/airflow-configurations), and cannot be overridden.
@@ -529,13 +529,13 @@ Args:
&quot;pypiPackages&quot;: { # Optional. Custom Python Package Index (PyPI) packages to be installed in the environment. Keys refer to the lowercase package name such as &quot;numpy&quot; and values are the lowercase extras and version specifier such as &quot;==1.12.0&quot;, &quot;[devel,gcp_api]&quot;, or &quot;[devel]&gt;=1.8.2, &lt;1.9.2&quot;. To specify a package without pinning it to a version specifier, use the empty string as the value.
&quot;a_key&quot;: &quot;A String&quot;,
},
- &quot;pythonVersion&quot;: &quot;A String&quot;, # Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to &#x27;2&#x27; or &#x27;3&#x27;. If not specified, the default is &#x27;3&#x27;. Cannot be updated.
+ &quot;pythonVersion&quot;: &quot;A String&quot;, # Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to &#x27;2&#x27; or &#x27;3&#x27;. If not specified, the default is &#x27;3&#x27;. Cannot be updated. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use Python major version 3.
&quot;schedulerCount&quot;: 42, # Optional. The number of schedulers for Airflow. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*.
},
- &quot;webServerConfig&quot;: { # The configuration settings for the Airflow web server App Engine instance. # Optional. The configuration settings for the Airflow web server App Engine instance.
+ &quot;webServerConfig&quot;: { # The configuration settings for the Airflow web server App Engine instance. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.* # Optional. The configuration settings for the Airflow web server App Engine instance.
&quot;machineType&quot;: &quot;A String&quot;, # Optional. Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. If not specified, composer-n1-webserver-2 will be used. Value custom is returned only in response, if Airflow web server parameters were manually changed to a non-standard values.
},
- &quot;webServerNetworkAccessControl&quot;: { # Network-level access control policy for the Airflow web server. # Optional. The network-level access control policy for the Airflow web server. If unspecified, no network-level access restrictions will be applied.
+ &quot;webServerNetworkAccessControl&quot;: { # Network-level access control policy for the Airflow web server. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The network-level access control policy for the Airflow web server. If unspecified, no network-level access restrictions will be applied. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;allowedIpRanges&quot;: [ # A collection of allowed IP ranges with descriptions.
{ # Allowed IP range with user-provided description.
&quot;description&quot;: &quot;A String&quot;, # Optional. User-provided description. It must contain at most 300 characters.
@@ -554,7 +554,7 @@ Args:
&quot;uuid&quot;: &quot;A String&quot;, # Output only. The UUID (Universally Unique IDentifier) associated with this environment. This value is generated when the environment is created.
}
- updateMask: string, Required. A comma-separated list of paths, relative to `Environment`, of fields to update. For example, to set the version of scikit-learn to install in the environment to 0.19.0 and to remove an existing installation of numpy, the `updateMask` parameter would include the following two `paths` values: &quot;config.softwareConfig.pypiPackages.scikit-learn&quot; and &quot;config.softwareConfig.pypiPackages.numpy&quot;. The included patch environment would specify the scikit-learn version as follows: { &quot;config&quot;:{ &quot;softwareConfig&quot;:{ &quot;pypiPackages&quot;:{ &quot;scikit-learn&quot;:&quot;==0.19.0&quot; } } } } Note that in the above example, any existing PyPI packages other than scikit-learn and numpy will be unaffected. Only one update type may be included in a single request&#x27;s `updateMask`. For example, one cannot update both the PyPI packages and labels in the same request. However, it is possible to update multiple members of a map field simultaneously in the same request. For example, to set the labels &quot;label1&quot; and &quot;label2&quot; while clearing &quot;label3&quot; (assuming it already exists), one can provide the paths &quot;labels.label1&quot;, &quot;labels.label2&quot;, and &quot;labels.label3&quot; and populate the patch environment as follows: { &quot;labels&quot;:{ &quot;label1&quot;:&quot;new-label1-value&quot; &quot;label2&quot;:&quot;new-label2-value&quot; } } Note that in the above example, any existing labels that are not included in the `updateMask` will be unaffected. It is also possible to replace an entire map field by providing the map field&#x27;s path in the `updateMask`. The new value of the field will be that which is provided in the patch environment. For example, to delete all pre-existing user-specified PyPI packages and install botocore at version 1.7.14, the `updateMask` would contain the path &quot;config.softwareConfig.pypiPackages&quot;, and the patch environment would be the following: { &quot;config&quot;:{ &quot;softwareConfig&quot;:{ &quot;pypiPackages&quot;:{ &quot;botocore&quot;:&quot;==1.7.14&quot; } } } } **Note:** Only the following fields can be updated: * `config.softwareConfig.pypiPackages` * Replace all custom custom PyPI packages. If a replacement package map is not included in `environment`, all custom PyPI packages are cleared. It is an error to provide both this mask and a mask specifying an individual package. * `config.softwareConfig.pypiPackages.`packagename * Update the custom PyPI package *packagename*, preserving other packages. To delete the package, include it in `updateMask`, and omit the mapping for it in `environment.config.softwareConfig.pypiPackages`. It is an error to provide both a mask of this form and the `config.softwareConfig.pypiPackages` mask. * `labels` * Replace all environment labels. If a replacement labels map is not included in `environment`, all labels are cleared. It is an error to provide both this mask and a mask specifying one or more individual labels. * `labels.`labelName * Set the label named *labelName*, while preserving other labels. To delete the label, include it in `updateMask` and omit its mapping in `environment.labels`. It is an error to provide both a mask of this form and the `labels` mask. * `config.nodeCount` * Horizontally scale the number of nodes in the environment. An integer greater than or equal to 3 must be provided in the `config.nodeCount` field. * `config.webServerNetworkAccessControl` * Replace the environment&#x27;s current `WebServerNetworkAccessControl`. * `config.databaseConfig` * Replace the environment&#x27;s current `DatabaseConfig`. * `config.webServerConfig` * Replace the environment&#x27;s current `WebServerConfig`. * `config.softwareConfig.airflowConfigOverrides` * Replace all Apache Airflow config overrides. If a replacement config overrides map is not included in `environment`, all config overrides are cleared. It is an error to provide both this mask and a mask specifying one or more individual config overrides. * `config.softwareConfig.airflowConfigOverrides.`section-name * Override the Apache Airflow config property *name* in the section named *section*, preserving other properties. To delete the property override, include it in `updateMask` and omit its mapping in `environment.config.softwareConfig.airflowConfigOverrides`. It is an error to provide both a mask of this form and the `config.softwareConfig.airflowConfigOverrides` mask. * `config.softwareConfig.envVariables` * Replace all environment variables. If a replacement environment variable map is not included in `environment`, all custom environment variables are cleared. It is an error to provide both this mask and a mask specifying one or more individual environment variables. * `config.softwareConfig.schedulerCount` * Horizontally scale the number of schedulers in Airflow. A positive integer not greater than the number of nodes must be provided in the `config.softwareConfig.schedulerCount` field. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*.
+ updateMask: string, Required. A comma-separated list of paths, relative to `Environment`, of fields to update. For example, to set the version of scikit-learn to install in the environment to 0.19.0 and to remove an existing installation of numpy, the `updateMask` parameter would include the following two `paths` values: &quot;config.softwareConfig.pypiPackages.scikit-learn&quot; and &quot;config.softwareConfig.pypiPackages.numpy&quot;. The included patch environment would specify the scikit-learn version as follows: { &quot;config&quot;:{ &quot;softwareConfig&quot;:{ &quot;pypiPackages&quot;:{ &quot;scikit-learn&quot;:&quot;==0.19.0&quot; } } } } Note that in the above example, any existing PyPI packages other than scikit-learn and numpy will be unaffected. Only one update type may be included in a single request&#x27;s `updateMask`. For example, one cannot update both the PyPI packages and labels in the same request. However, it is possible to update multiple members of a map field simultaneously in the same request. For example, to set the labels &quot;label1&quot; and &quot;label2&quot; while clearing &quot;label3&quot; (assuming it already exists), one can provide the paths &quot;labels.label1&quot;, &quot;labels.label2&quot;, and &quot;labels.label3&quot; and populate the patch environment as follows: { &quot;labels&quot;:{ &quot;label1&quot;:&quot;new-label1-value&quot; &quot;label2&quot;:&quot;new-label2-value&quot; } } Note that in the above example, any existing labels that are not included in the `updateMask` will be unaffected. It is also possible to replace an entire map field by providing the map field&#x27;s path in the `updateMask`. The new value of the field will be that which is provided in the patch environment. For example, to delete all pre-existing user-specified PyPI packages and install botocore at version 1.7.14, the `updateMask` would contain the path &quot;config.softwareConfig.pypiPackages&quot;, and the patch environment would be the following: { &quot;config&quot;:{ &quot;softwareConfig&quot;:{ &quot;pypiPackages&quot;:{ &quot;botocore&quot;:&quot;==1.7.14&quot; } } } } **Note:** Only the following fields can be updated: * `config.softwareConfig.pypiPackages` * Replace all custom custom PyPI packages. If a replacement package map is not included in `environment`, all custom PyPI packages are cleared. It is an error to provide both this mask and a mask specifying an individual package. * `config.softwareConfig.pypiPackages.`packagename * Update the custom PyPI package *packagename*, preserving other packages. To delete the package, include it in `updateMask`, and omit the mapping for it in `environment.config.softwareConfig.pypiPackages`. It is an error to provide both a mask of this form and the `config.softwareConfig.pypiPackages` mask. * `labels` * Replace all environment labels. If a replacement labels map is not included in `environment`, all labels are cleared. It is an error to provide both this mask and a mask specifying one or more individual labels. * `labels.`labelName * Set the label named *labelName*, while preserving other labels. To delete the label, include it in `updateMask` and omit its mapping in `environment.labels`. It is an error to provide both a mask of this form and the `labels` mask. * `config.nodeCount` * Horizontally scale the number of nodes in the environment. An integer greater than or equal to 3 must be provided in the `config.nodeCount` field. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. * `config.webServerNetworkAccessControl` * Replace the environment&#x27;s current `WebServerNetworkAccessControl`. * `config.databaseConfig` Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. * Replace the environment&#x27;s current `DatabaseConfig`. * `config.webServerConfig.machineType` * Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. * `config.softwareConfig.airflowConfigOverrides` * Replace all Apache Airflow config overrides. If a replacement config overrides map is not included in `environment`, all config overrides are cleared. It is an error to provide both this mask and a mask specifying one or more individual config overrides. * `config.softwareConfig.airflowConfigOverrides.`section-name * Override the Apache Airflow config property *name* in the section named *section*, preserving other properties. To delete the property override, include it in `updateMask` and omit its mapping in `environment.config.softwareConfig.airflowConfigOverrides`. It is an error to provide both a mask of this form and the `config.softwareConfig.airflowConfigOverrides` mask. * `config.softwareConfig.envVariables` * Replace all environment variables. If a replacement environment variable map is not included in `environment`, all custom environment variables are cleared. It is an error to provide both this mask and a mask specifying one or more individual environment variables. * `config.softwareConfig.schedulerCount` * Horizontally scale the number of schedulers in Airflow. A positive integer not greater than the number of nodes must be provided in the `config.softwareConfig.schedulerCount` field. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*. * `config.databaseConfig.machineType` * Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. * `config.webServerConfig.machineType` * Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
x__xgafv: string, V1 error format.
Allowed values
1 - v1 error format
diff --git a/docs/dyn/composer_v1beta1.projects.locations.environments.html b/docs/dyn/composer_v1beta1.projects.locations.environments.html
index 6923ca6a8..342eba984 100644
--- a/docs/dyn/composer_v1beta1.projects.locations.environments.html
+++ b/docs/dyn/composer_v1beta1.projects.locations.environments.html
@@ -179,10 +179,10 @@ Args:
&quot;diskSizeGb&quot;: 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;enableIpMasqAgent&quot;: True or False, # Optional. Deploys &#x27;ip-masq-agent&#x27; daemon set in the GKE cluster and defines nonMasqueradeCIDRs equals to pod IP range so IP masquerading is used for all destination addresses, except between pods traffic. See: https://cloud.google.com/kubernetes-engine/docs/how-to/ip-masquerade-agent
&quot;ipAllocationPolicy&quot;: { # Configuration for controlling how IPs are allocated in the GKE cluster. # Optional. The IPAllocationPolicy fields for the GKE cluster.
- &quot;clusterIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range used to allocate IP addresses to pods in the cluster. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both.
- &quot;clusterSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the cluster&#x27;s secondary range used to allocate IP addresses to pods. Specify either `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
- &quot;servicesIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range of the services IP addresses in this cluster. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `services_secondary_range_name` or `services_ipv4_cidr_block` but not both.
- &quot;servicesSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the services&#x27; secondary range used to allocate IP addresses to the cluster. Specify either `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
+ &quot;clusterIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range used to allocate IP addresses to pods in the cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both.
+ &quot;clusterSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the cluster&#x27;s secondary range used to allocate IP addresses to pods. Specify either `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
+ &quot;servicesIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range of the services IP addresses in this cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `services_secondary_range_name` or `services_ipv4_cidr_block` but not both.
+ &quot;servicesSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the services&#x27; secondary range used to allocate IP addresses to the cluster. Specify either `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
&quot;useIpAliases&quot;: True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters.
},
&quot;location&quot;: &quot;A String&quot;, # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/zones/{zoneId}&quot;. This `location` must belong to the enclosing environment&#x27;s project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
@@ -368,10 +368,10 @@ Returns:
&quot;diskSizeGb&quot;: 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;enableIpMasqAgent&quot;: True or False, # Optional. Deploys &#x27;ip-masq-agent&#x27; daemon set in the GKE cluster and defines nonMasqueradeCIDRs equals to pod IP range so IP masquerading is used for all destination addresses, except between pods traffic. See: https://cloud.google.com/kubernetes-engine/docs/how-to/ip-masquerade-agent
&quot;ipAllocationPolicy&quot;: { # Configuration for controlling how IPs are allocated in the GKE cluster. # Optional. The IPAllocationPolicy fields for the GKE cluster.
- &quot;clusterIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range used to allocate IP addresses to pods in the cluster. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both.
- &quot;clusterSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the cluster&#x27;s secondary range used to allocate IP addresses to pods. Specify either `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
- &quot;servicesIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range of the services IP addresses in this cluster. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `services_secondary_range_name` or `services_ipv4_cidr_block` but not both.
- &quot;servicesSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the services&#x27; secondary range used to allocate IP addresses to the cluster. Specify either `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
+ &quot;clusterIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range used to allocate IP addresses to pods in the cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both.
+ &quot;clusterSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the cluster&#x27;s secondary range used to allocate IP addresses to pods. Specify either `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
+ &quot;servicesIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range of the services IP addresses in this cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `services_secondary_range_name` or `services_ipv4_cidr_block` but not both.
+ &quot;servicesSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the services&#x27; secondary range used to allocate IP addresses to the cluster. Specify either `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
&quot;useIpAliases&quot;: True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters.
},
&quot;location&quot;: &quot;A String&quot;, # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/zones/{zoneId}&quot;. This `location` must belong to the enclosing environment&#x27;s project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
@@ -498,10 +498,10 @@ Returns:
&quot;diskSizeGb&quot;: 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;enableIpMasqAgent&quot;: True or False, # Optional. Deploys &#x27;ip-masq-agent&#x27; daemon set in the GKE cluster and defines nonMasqueradeCIDRs equals to pod IP range so IP masquerading is used for all destination addresses, except between pods traffic. See: https://cloud.google.com/kubernetes-engine/docs/how-to/ip-masquerade-agent
&quot;ipAllocationPolicy&quot;: { # Configuration for controlling how IPs are allocated in the GKE cluster. # Optional. The IPAllocationPolicy fields for the GKE cluster.
- &quot;clusterIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range used to allocate IP addresses to pods in the cluster. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both.
- &quot;clusterSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the cluster&#x27;s secondary range used to allocate IP addresses to pods. Specify either `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
- &quot;servicesIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range of the services IP addresses in this cluster. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `services_secondary_range_name` or `services_ipv4_cidr_block` but not both.
- &quot;servicesSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the services&#x27; secondary range used to allocate IP addresses to the cluster. Specify either `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
+ &quot;clusterIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range used to allocate IP addresses to pods in the cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both.
+ &quot;clusterSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the cluster&#x27;s secondary range used to allocate IP addresses to pods. Specify either `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
+ &quot;servicesIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range of the services IP addresses in this cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `services_secondary_range_name` or `services_ipv4_cidr_block` but not both.
+ &quot;servicesSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the services&#x27; secondary range used to allocate IP addresses to the cluster. Specify either `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
&quot;useIpAliases&quot;: True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters.
},
&quot;location&quot;: &quot;A String&quot;, # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/zones/{zoneId}&quot;. This `location` must belong to the enclosing environment&#x27;s project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
@@ -636,10 +636,10 @@ Args:
&quot;diskSizeGb&quot;: 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
&quot;enableIpMasqAgent&quot;: True or False, # Optional. Deploys &#x27;ip-masq-agent&#x27; daemon set in the GKE cluster and defines nonMasqueradeCIDRs equals to pod IP range so IP masquerading is used for all destination addresses, except between pods traffic. See: https://cloud.google.com/kubernetes-engine/docs/how-to/ip-masquerade-agent
&quot;ipAllocationPolicy&quot;: { # Configuration for controlling how IPs are allocated in the GKE cluster. # Optional. The IPAllocationPolicy fields for the GKE cluster.
- &quot;clusterIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range used to allocate IP addresses to pods in the cluster. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both.
- &quot;clusterSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the cluster&#x27;s secondary range used to allocate IP addresses to pods. Specify either `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
- &quot;servicesIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range of the services IP addresses in this cluster. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `services_secondary_range_name` or `services_ipv4_cidr_block` but not both.
- &quot;servicesSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the services&#x27; secondary range used to allocate IP addresses to the cluster. Specify either `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
+ &quot;clusterIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range used to allocate IP addresses to pods in the cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both.
+ &quot;clusterSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the cluster&#x27;s secondary range used to allocate IP addresses to pods. Specify either `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
+ &quot;servicesIpv4CidrBlock&quot;: &quot;A String&quot;, # Optional. The IP address range of the services IP addresses in this cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `services_secondary_range_name` or `services_ipv4_cidr_block` but not both.
+ &quot;servicesSecondaryRangeName&quot;: &quot;A String&quot;, # Optional. The name of the services&#x27; secondary range used to allocate IP addresses to the cluster. Specify either `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.
&quot;useIpAliases&quot;: True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters.
},
&quot;location&quot;: &quot;A String&quot;, # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: &quot;projects/{projectId}/zones/{zoneId}&quot;. This `location` must belong to the enclosing environment&#x27;s project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.
diff --git a/docs/dyn/container_v1.projects.locations.clusters.html b/docs/dyn/container_v1.projects.locations.clusters.html
index 0bfa22a96..91dab9006 100644
--- a/docs/dyn/container_v1.projects.locations.clusters.html
+++ b/docs/dyn/container_v1.projects.locations.clusters.html
@@ -520,8 +520,8 @@ Args:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
@@ -1121,8 +1121,8 @@ Returns:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
@@ -1625,8 +1625,8 @@ Returns:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
@@ -2784,8 +2784,8 @@ Args:
&quot;desiredNodePoolAutoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for the node pool specified in desired_node_pool_id. If there is only one pool in the cluster and desired_node_pool_id is not provided then the change applies to that single node pool.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;desiredNodePoolId&quot;: &quot;A String&quot;, # The node pool to be upgraded. This field is mandatory if &quot;desired_node_version&quot;, &quot;desired_image_family&quot; or &quot;desired_node_pool_autoscaling&quot; is specified and there is more than one node pool on the cluster.
&quot;desiredNodeVersion&quot;: &quot;A String&quot;, # The Kubernetes version to change the nodes to (typically an upgrade). Users may specify either explicit versions offered by Kubernetes Engine or version aliases, which have the following behavior: - &quot;latest&quot;: picks the highest valid Kubernetes version - &quot;1.X&quot;: picks the highest valid patch+gke.N patch in the 1.X version - &quot;1.X.Y&quot;: picks the highest valid gke.N patch in the 1.X.Y version - &quot;1.X.Y-gke.N&quot;: picks an explicit Kubernetes version - &quot;-&quot;: picks the Kubernetes master version
diff --git a/docs/dyn/container_v1.projects.locations.clusters.nodePools.html b/docs/dyn/container_v1.projects.locations.clusters.nodePools.html
index a0cdca625..88d52a545 100644
--- a/docs/dyn/container_v1.projects.locations.clusters.nodePools.html
+++ b/docs/dyn/container_v1.projects.locations.clusters.nodePools.html
@@ -125,8 +125,8 @@ Args:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
@@ -396,8 +396,8 @@ Returns:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
@@ -533,8 +533,8 @@ Returns:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
@@ -740,8 +740,8 @@ Args:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Required. Autoscaling configuration for the node pool.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;clusterId&quot;: &quot;A String&quot;, # Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.
&quot;name&quot;: &quot;A String&quot;, # The name (project, location, cluster, node pool) of the node pool to set autoscaler settings. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.
diff --git a/docs/dyn/container_v1.projects.zones.clusters.html b/docs/dyn/container_v1.projects.zones.clusters.html
index b25cf271c..00bbe8541 100644
--- a/docs/dyn/container_v1.projects.zones.clusters.html
+++ b/docs/dyn/container_v1.projects.zones.clusters.html
@@ -624,8 +624,8 @@ Args:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
@@ -1225,8 +1225,8 @@ Returns:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
@@ -1773,8 +1773,8 @@ Returns:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
@@ -2845,8 +2845,8 @@ Args:
&quot;desiredNodePoolAutoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for the node pool specified in desired_node_pool_id. If there is only one pool in the cluster and desired_node_pool_id is not provided then the change applies to that single node pool.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;desiredNodePoolId&quot;: &quot;A String&quot;, # The node pool to be upgraded. This field is mandatory if &quot;desired_node_version&quot;, &quot;desired_image_family&quot; or &quot;desired_node_pool_autoscaling&quot; is specified and there is more than one node pool on the cluster.
&quot;desiredNodeVersion&quot;: &quot;A String&quot;, # The Kubernetes version to change the nodes to (typically an upgrade). Users may specify either explicit versions offered by Kubernetes Engine or version aliases, which have the following behavior: - &quot;latest&quot;: picks the highest valid Kubernetes version - &quot;1.X&quot;: picks the highest valid patch+gke.N patch in the 1.X version - &quot;1.X.Y&quot;: picks the highest valid gke.N patch in the 1.X.Y version - &quot;1.X.Y-gke.N&quot;: picks an explicit Kubernetes version - &quot;-&quot;: picks the Kubernetes master version
diff --git a/docs/dyn/container_v1.projects.zones.clusters.nodePools.html b/docs/dyn/container_v1.projects.zones.clusters.nodePools.html
index 495e46501..695a62d23 100644
--- a/docs/dyn/container_v1.projects.zones.clusters.nodePools.html
+++ b/docs/dyn/container_v1.projects.zones.clusters.nodePools.html
@@ -121,8 +121,8 @@ Args:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Required. Autoscaling configuration for the node pool.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;clusterId&quot;: &quot;A String&quot;, # Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.
&quot;name&quot;: &quot;A String&quot;, # The name (project, location, cluster, node pool) of the node pool to set autoscaler settings. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.
@@ -214,8 +214,8 @@ Args:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
@@ -485,8 +485,8 @@ Returns:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
@@ -622,8 +622,8 @@ Returns:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
diff --git a/docs/dyn/container_v1beta1.projects.locations.clusters.html b/docs/dyn/container_v1beta1.projects.locations.clusters.html
index 600aa9e9f..af83aefd0 100644
--- a/docs/dyn/container_v1beta1.projects.locations.clusters.html
+++ b/docs/dyn/container_v1beta1.projects.locations.clusters.html
@@ -554,8 +554,8 @@ Args:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
@@ -1208,8 +1208,8 @@ Returns:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
@@ -1765,8 +1765,8 @@ Returns:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
@@ -2970,8 +2970,8 @@ Args:
&quot;desiredNodePoolAutoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for the node pool specified in desired_node_pool_id. If there is only one pool in the cluster and desired_node_pool_id is not provided then the change applies to that single node pool.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;desiredNodePoolId&quot;: &quot;A String&quot;, # The node pool to be upgraded. This field is mandatory if &quot;desired_node_version&quot;, &quot;desired_image_family&quot;, &quot;desired_node_pool_autoscaling&quot;, or &quot;desired_workload_metadata_config&quot; is specified and there is more than one node pool on the cluster.
&quot;desiredNodeVersion&quot;: &quot;A String&quot;, # The Kubernetes version to change the nodes to (typically an upgrade). Users may specify either explicit versions offered by Kubernetes Engine or version aliases, which have the following behavior: - &quot;latest&quot;: picks the highest valid Kubernetes version - &quot;1.X&quot;: picks the highest valid patch+gke.N patch in the 1.X version - &quot;1.X.Y&quot;: picks the highest valid gke.N patch in the 1.X.Y version - &quot;1.X.Y-gke.N&quot;: picks an explicit Kubernetes version - &quot;-&quot;: picks the Kubernetes master version
diff --git a/docs/dyn/container_v1beta1.projects.locations.clusters.nodePools.html b/docs/dyn/container_v1beta1.projects.locations.clusters.nodePools.html
index 8de075642..cebc15572 100644
--- a/docs/dyn/container_v1beta1.projects.locations.clusters.nodePools.html
+++ b/docs/dyn/container_v1beta1.projects.locations.clusters.nodePools.html
@@ -125,8 +125,8 @@ Args:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
@@ -401,8 +401,8 @@ Returns:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
@@ -543,8 +543,8 @@ Returns:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
@@ -755,8 +755,8 @@ Args:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Required. Autoscaling configuration for the node pool.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;clusterId&quot;: &quot;A String&quot;, # Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.
&quot;name&quot;: &quot;A String&quot;, # The name (project, location, cluster, node pool) of the node pool to set autoscaler settings. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.
diff --git a/docs/dyn/container_v1beta1.projects.zones.clusters.html b/docs/dyn/container_v1beta1.projects.zones.clusters.html
index ba33ce76d..ca561f033 100644
--- a/docs/dyn/container_v1beta1.projects.zones.clusters.html
+++ b/docs/dyn/container_v1beta1.projects.zones.clusters.html
@@ -665,8 +665,8 @@ Args:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
@@ -1319,8 +1319,8 @@ Returns:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
@@ -1920,8 +1920,8 @@ Returns:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
@@ -3031,8 +3031,8 @@ Args:
&quot;desiredNodePoolAutoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for the node pool specified in desired_node_pool_id. If there is only one pool in the cluster and desired_node_pool_id is not provided then the change applies to that single node pool.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;desiredNodePoolId&quot;: &quot;A String&quot;, # The node pool to be upgraded. This field is mandatory if &quot;desired_node_version&quot;, &quot;desired_image_family&quot;, &quot;desired_node_pool_autoscaling&quot;, or &quot;desired_workload_metadata_config&quot; is specified and there is more than one node pool on the cluster.
&quot;desiredNodeVersion&quot;: &quot;A String&quot;, # The Kubernetes version to change the nodes to (typically an upgrade). Users may specify either explicit versions offered by Kubernetes Engine or version aliases, which have the following behavior: - &quot;latest&quot;: picks the highest valid Kubernetes version - &quot;1.X&quot;: picks the highest valid patch+gke.N patch in the 1.X version - &quot;1.X.Y&quot;: picks the highest valid gke.N patch in the 1.X.Y version - &quot;1.X.Y-gke.N&quot;: picks an explicit Kubernetes version - &quot;-&quot;: picks the Kubernetes master version
diff --git a/docs/dyn/container_v1beta1.projects.zones.clusters.nodePools.html b/docs/dyn/container_v1beta1.projects.zones.clusters.nodePools.html
index 83a022e08..7335d2a60 100644
--- a/docs/dyn/container_v1beta1.projects.zones.clusters.nodePools.html
+++ b/docs/dyn/container_v1beta1.projects.zones.clusters.nodePools.html
@@ -121,8 +121,8 @@ Args:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Required. Autoscaling configuration for the node pool.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;clusterId&quot;: &quot;A String&quot;, # Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.
&quot;name&quot;: &quot;A String&quot;, # The name (project, location, cluster, node pool) of the node pool to set autoscaler settings. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.
@@ -214,8 +214,8 @@ Args:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
@@ -490,8 +490,8 @@ Returns:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
@@ -632,8 +632,8 @@ Returns:
&quot;autoscaling&quot;: { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present.
&quot;autoprovisioned&quot;: True or False, # Can this node pool be deleted automatically.
&quot;enabled&quot;: True or False, # Is autoscaling enabled for this node pool.
- &quot;maxNodeCount&quot;: 42, # Maximum number of nodes in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
- &quot;minNodeCount&quot;: 42, # Minimum number of nodes in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
+ &quot;maxNodeCount&quot;: 42, # Maximum number of nodes for one location in the NodePool. Must be &gt;= min_node_count. There has to be enough quota to scale up the cluster.
+ &quot;minNodeCount&quot;: 42, # Minimum number of nodes for one location in the NodePool. Must be &gt;= 1 and &lt;= max_node_count.
},
&quot;conditions&quot;: [ # Which conditions caused the current node pool state.
{ # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).
diff --git a/docs/dyn/content_v2_1.accounts.html b/docs/dyn/content_v2_1.accounts.html
index cdf2ba7d3..2b0203454 100644
--- a/docs/dyn/content_v2_1.accounts.html
+++ b/docs/dyn/content_v2_1.accounts.html
@@ -225,7 +225,7 @@ Args:
&quot;url&quot;: &quot;A String&quot;, # Customer service URL.
},
&quot;koreanBusinessRegistrationNumber&quot;: &quot;A String&quot;, # The 10-digit [Korean business registration number](https://support.google.com/merchants/answer/9037766) separated with dashes in the format: XXX-XX-XXXXX. This field will only be updated if explicitly set.
- &quot;phoneNumber&quot;: &quot;A String&quot;, # The phone number of the business.
+ &quot;phoneNumber&quot;: &quot;A String&quot;, # ! The phone number of the business. This can only be updated if a verified ! phone number is not already set. To replace a verified phone number use ! the `Accounts.requestphoneverification` and ! `Accounts.verifyphonenumber`.
},
&quot;cssId&quot;: &quot;A String&quot;, # ID of CSS the account belongs to.
&quot;googleMyBusinessLink&quot;: { # The GMB account which is linked or in the process of being linked with the Merchant Center account.
@@ -316,7 +316,7 @@ Returns:
&quot;url&quot;: &quot;A String&quot;, # Customer service URL.
},
&quot;koreanBusinessRegistrationNumber&quot;: &quot;A String&quot;, # The 10-digit [Korean business registration number](https://support.google.com/merchants/answer/9037766) separated with dashes in the format: XXX-XX-XXXXX. This field will only be updated if explicitly set.
- &quot;phoneNumber&quot;: &quot;A String&quot;, # The phone number of the business.
+ &quot;phoneNumber&quot;: &quot;A String&quot;, # ! The phone number of the business. This can only be updated if a verified ! phone number is not already set. To replace a verified phone number use ! the `Accounts.requestphoneverification` and ! `Accounts.verifyphonenumber`.
},
&quot;cssId&quot;: &quot;A String&quot;, # ID of CSS the account belongs to.
&quot;googleMyBusinessLink&quot;: { # The GMB account which is linked or in the process of being linked with the Merchant Center account.
@@ -427,7 +427,7 @@ Returns:
&quot;url&quot;: &quot;A String&quot;, # Customer service URL.
},
&quot;koreanBusinessRegistrationNumber&quot;: &quot;A String&quot;, # The 10-digit [Korean business registration number](https://support.google.com/merchants/answer/9037766) separated with dashes in the format: XXX-XX-XXXXX. This field will only be updated if explicitly set.
- &quot;phoneNumber&quot;: &quot;A String&quot;, # The phone number of the business.
+ &quot;phoneNumber&quot;: &quot;A String&quot;, # ! The phone number of the business. This can only be updated if a verified ! phone number is not already set. To replace a verified phone number use ! the `Accounts.requestphoneverification` and ! `Accounts.verifyphonenumber`.
},
&quot;cssId&quot;: &quot;A String&quot;, # ID of CSS the account belongs to.
&quot;googleMyBusinessLink&quot;: { # The GMB account which is linked or in the process of being linked with the Merchant Center account.
@@ -496,7 +496,7 @@ Args:
&quot;url&quot;: &quot;A String&quot;, # Customer service URL.
},
&quot;koreanBusinessRegistrationNumber&quot;: &quot;A String&quot;, # The 10-digit [Korean business registration number](https://support.google.com/merchants/answer/9037766) separated with dashes in the format: XXX-XX-XXXXX. This field will only be updated if explicitly set.
- &quot;phoneNumber&quot;: &quot;A String&quot;, # The phone number of the business.
+ &quot;phoneNumber&quot;: &quot;A String&quot;, # ! The phone number of the business. This can only be updated if a verified ! phone number is not already set. To replace a verified phone number use ! the `Accounts.requestphoneverification` and ! `Accounts.verifyphonenumber`.
},
&quot;cssId&quot;: &quot;A String&quot;, # ID of CSS the account belongs to.
&quot;googleMyBusinessLink&quot;: { # The GMB account which is linked or in the process of being linked with the Merchant Center account.
@@ -563,7 +563,7 @@ Returns:
&quot;url&quot;: &quot;A String&quot;, # Customer service URL.
},
&quot;koreanBusinessRegistrationNumber&quot;: &quot;A String&quot;, # The 10-digit [Korean business registration number](https://support.google.com/merchants/answer/9037766) separated with dashes in the format: XXX-XX-XXXXX. This field will only be updated if explicitly set.
- &quot;phoneNumber&quot;: &quot;A String&quot;, # The phone number of the business.
+ &quot;phoneNumber&quot;: &quot;A String&quot;, # ! The phone number of the business. This can only be updated if a verified ! phone number is not already set. To replace a verified phone number use ! the `Accounts.requestphoneverification` and ! `Accounts.verifyphonenumber`.
},
&quot;cssId&quot;: &quot;A String&quot;, # ID of CSS the account belongs to.
&quot;googleMyBusinessLink&quot;: { # The GMB account which is linked or in the process of being linked with the Merchant Center account.
@@ -609,6 +609,9 @@ Args:
{
&quot;action&quot;: &quot;A String&quot;, # Action to perform for this link. The `&quot;request&quot;` action is only available to select merchants. Acceptable values are: - &quot;`approve`&quot; - &quot;`remove`&quot; - &quot;`request`&quot;
+ &quot;eCommercePlatformLinkInfo&quot;: { # Additional information required for E_COMMERCE_PLATFORM link type. # Additional information required for `eCommercePlatform` link type.
+ &quot;externalAccountId&quot;: &quot;A String&quot;, # The id used by the third party service provider to identify the merchant.
+ },
&quot;linkType&quot;: &quot;A String&quot;, # Type of the link between the two accounts. Acceptable values are: - &quot;`channelPartner`&quot; - &quot;`eCommercePlatform`&quot; - &quot;`paymentServiceProvider`&quot;
&quot;linkedAccountId&quot;: &quot;A String&quot;, # The ID of the linked account.
&quot;paymentServiceProviderLinkInfo&quot;: { # Additional information required for PAYMENT_SERVICE_PROVIDER link type. # Additional information required for `paymentServiceProvider` link type.
@@ -685,7 +688,7 @@ Returns:
&quot;url&quot;: &quot;A String&quot;, # Customer service URL.
},
&quot;koreanBusinessRegistrationNumber&quot;: &quot;A String&quot;, # The 10-digit [Korean business registration number](https://support.google.com/merchants/answer/9037766) separated with dashes in the format: XXX-XX-XXXXX. This field will only be updated if explicitly set.
- &quot;phoneNumber&quot;: &quot;A String&quot;, # The phone number of the business.
+ &quot;phoneNumber&quot;: &quot;A String&quot;, # ! The phone number of the business. This can only be updated if a verified ! phone number is not already set. To replace a verified phone number use ! the `Accounts.requestphoneverification` and ! `Accounts.verifyphonenumber`.
},
&quot;cssId&quot;: &quot;A String&quot;, # ID of CSS the account belongs to.
&quot;googleMyBusinessLink&quot;: { # The GMB account which is linked or in the process of being linked with the Merchant Center account.
@@ -849,7 +852,7 @@ Args:
&quot;url&quot;: &quot;A String&quot;, # Customer service URL.
},
&quot;koreanBusinessRegistrationNumber&quot;: &quot;A String&quot;, # The 10-digit [Korean business registration number](https://support.google.com/merchants/answer/9037766) separated with dashes in the format: XXX-XX-XXXXX. This field will only be updated if explicitly set.
- &quot;phoneNumber&quot;: &quot;A String&quot;, # The phone number of the business.
+ &quot;phoneNumber&quot;: &quot;A String&quot;, # ! The phone number of the business. This can only be updated if a verified ! phone number is not already set. To replace a verified phone number use ! the `Accounts.requestphoneverification` and ! `Accounts.verifyphonenumber`.
},
&quot;cssId&quot;: &quot;A String&quot;, # ID of CSS the account belongs to.
&quot;googleMyBusinessLink&quot;: { # The GMB account which is linked or in the process of being linked with the Merchant Center account.
@@ -916,7 +919,7 @@ Returns:
&quot;url&quot;: &quot;A String&quot;, # Customer service URL.
},
&quot;koreanBusinessRegistrationNumber&quot;: &quot;A String&quot;, # The 10-digit [Korean business registration number](https://support.google.com/merchants/answer/9037766) separated with dashes in the format: XXX-XX-XXXXX. This field will only be updated if explicitly set.
- &quot;phoneNumber&quot;: &quot;A String&quot;, # The phone number of the business.
+ &quot;phoneNumber&quot;: &quot;A String&quot;, # ! The phone number of the business. This can only be updated if a verified ! phone number is not already set. To replace a verified phone number use ! the `Accounts.requestphoneverification` and ! `Accounts.verifyphonenumber`.
},
&quot;cssId&quot;: &quot;A String&quot;, # ID of CSS the account belongs to.
&quot;googleMyBusinessLink&quot;: { # The GMB account which is linked or in the process of being linked with the Merchant Center account.
diff --git a/docs/dyn/content_v2_1.localinventory.html b/docs/dyn/content_v2_1.localinventory.html
index 6e38ec2e4..06b2d6f8a 100644
--- a/docs/dyn/content_v2_1.localinventory.html
+++ b/docs/dyn/content_v2_1.localinventory.html
@@ -105,7 +105,7 @@ Args:
&quot;availability&quot;: &quot;A String&quot;, # Availability of the product. For accepted attribute values, see the local product inventory feed specification.
&quot;instoreProductLocation&quot;: &quot;A String&quot;, # In-store product location.
&quot;kind&quot;: &quot;A String&quot;, # Identifies what kind of resource this is. Value: the fixed string &quot;`content#localInventory`&quot;
- &quot;pickupMethod&quot;: &quot;A String&quot;, # Supported pickup method for this offer. Unless the value is &quot;not supported&quot;, this field must be submitted together with `pickupSla`. For accepted attribute values, see the local product inventory feed // specification.
+ &quot;pickupMethod&quot;: &quot;A String&quot;, # Supported pickup method for this offer. Unless the value is &quot;not supported&quot;, this field must be submitted together with `pickupSla`. For accepted attribute values, see the local product inventory feed specification.
&quot;pickupSla&quot;: &quot;A String&quot;, # Expected date that an order will be ready for pickup relative to the order date. Must be submitted together with `pickupMethod`. For accepted attribute values, see the local product inventory feed specification.
&quot;price&quot;: { # Price of the product.
&quot;currency&quot;: &quot;A String&quot;, # The currency of the price.
@@ -170,7 +170,7 @@ Args:
&quot;availability&quot;: &quot;A String&quot;, # Availability of the product. For accepted attribute values, see the local product inventory feed specification.
&quot;instoreProductLocation&quot;: &quot;A String&quot;, # In-store product location.
&quot;kind&quot;: &quot;A String&quot;, # Identifies what kind of resource this is. Value: the fixed string &quot;`content#localInventory`&quot;
- &quot;pickupMethod&quot;: &quot;A String&quot;, # Supported pickup method for this offer. Unless the value is &quot;not supported&quot;, this field must be submitted together with `pickupSla`. For accepted attribute values, see the local product inventory feed // specification.
+ &quot;pickupMethod&quot;: &quot;A String&quot;, # Supported pickup method for this offer. Unless the value is &quot;not supported&quot;, this field must be submitted together with `pickupSla`. For accepted attribute values, see the local product inventory feed specification.
&quot;pickupSla&quot;: &quot;A String&quot;, # Expected date that an order will be ready for pickup relative to the order date. Must be submitted together with `pickupMethod`. For accepted attribute values, see the local product inventory feed specification.
&quot;price&quot;: { # Price of the product.
&quot;currency&quot;: &quot;A String&quot;, # The currency of the price.
@@ -197,7 +197,7 @@ Returns:
&quot;availability&quot;: &quot;A String&quot;, # Availability of the product. For accepted attribute values, see the local product inventory feed specification.
&quot;instoreProductLocation&quot;: &quot;A String&quot;, # In-store product location.
&quot;kind&quot;: &quot;A String&quot;, # Identifies what kind of resource this is. Value: the fixed string &quot;`content#localInventory`&quot;
- &quot;pickupMethod&quot;: &quot;A String&quot;, # Supported pickup method for this offer. Unless the value is &quot;not supported&quot;, this field must be submitted together with `pickupSla`. For accepted attribute values, see the local product inventory feed // specification.
+ &quot;pickupMethod&quot;: &quot;A String&quot;, # Supported pickup method for this offer. Unless the value is &quot;not supported&quot;, this field must be submitted together with `pickupSla`. For accepted attribute values, see the local product inventory feed specification.
&quot;pickupSla&quot;: &quot;A String&quot;, # Expected date that an order will be ready for pickup relative to the order date. Must be submitted together with `pickupMethod`. For accepted attribute values, see the local product inventory feed specification.
&quot;price&quot;: { # Price of the product.
&quot;currency&quot;: &quot;A String&quot;, # The currency of the price.
diff --git a/docs/dyn/content_v2_1.orders.html b/docs/dyn/content_v2_1.orders.html
index b36f13609..0b98ab123 100644
--- a/docs/dyn/content_v2_1.orders.html
+++ b/docs/dyn/content_v2_1.orders.html
@@ -91,7 +91,7 @@ h1, h2, h3 {
<p class="firstline">Sandbox only. Cancels a test order for customer-initiated cancellation.</p>
<p class="toc_element">
<code><a href="#captureOrder">captureOrder(merchantId, orderId, body=None, x__xgafv=None)</a></code></p>
-<p class="firstline">Capture funds from the customer for the current order total. This method should be called after the merchant verifies that they are able and ready to start shipping the order. This method blocks until a response is received from the payment processsor. If this method succeeds, the merchant is guaranteed to receive funds for the order after shipment. If the request fails, it can be retried or the order may be cancelled. This method cannot be called after the entire order is already shipped.</p>
+<p class="firstline">Capture funds from the customer for the current order total. This method should be called after the merchant verifies that they are able and ready to start shipping the order. This method blocks until a response is received from the payment processsor. If this method succeeds, the merchant is guaranteed to receive funds for the order after shipment. If the request fails, it can be retried or the order may be cancelled. This method cannot be called after the entire order is already shipped. A rejected error code is returned when the payment service provider has declined the charge. This indicates a problem between the PSP and either the merchant's or customer's account. Sometimes this error will be resolved by the customer. We recommend retrying these errors once per day or cancelling the order with reason `failedToCaptureFunds` if the items cannot be held.</p>
<p class="toc_element">
<code><a href="#close">close()</a></code></p>
<p class="firstline">Close httplib2 connections.</p>
@@ -207,7 +207,7 @@ Args:
{
&quot;operationId&quot;: &quot;A String&quot;, # The ID of the operation. Unique across all operations for a given order.
- &quot;reason&quot;: &quot;A String&quot;, # The reason for the cancellation. Acceptable values are: - &quot;`customerInitiatedCancel`&quot; - &quot;`invalidCoupon`&quot; - &quot;`malformedShippingAddress`&quot; - &quot;`noInventory`&quot; - &quot;`other`&quot; - &quot;`priceError`&quot; - &quot;`shippingPriceError`&quot; - &quot;`taxError`&quot; - &quot;`undeliverableShippingAddress`&quot; - &quot;`unsupportedPoBoxAddress`&quot;
+ &quot;reason&quot;: &quot;A String&quot;, # The reason for the cancellation. Acceptable values are: - &quot;`customerInitiatedCancel`&quot; - &quot;`invalidCoupon`&quot; - &quot;`malformedShippingAddress`&quot; - &quot;`noInventory`&quot; - &quot;`other`&quot; - &quot;`priceError`&quot; - &quot;`shippingPriceError`&quot; - &quot;`taxError`&quot; - &quot;`undeliverableShippingAddress`&quot; - &quot;`unsupportedPoBoxAddress`&quot; - &quot;`failedToCaptureFunds`&quot;
&quot;reasonText&quot;: &quot;A String&quot;, # The explanation of the reason.
}
@@ -240,7 +240,7 @@ Args:
&quot;operationId&quot;: &quot;A String&quot;, # The ID of the operation. Unique across all operations for a given order.
&quot;productId&quot;: &quot;A String&quot;, # The ID of the product to cancel. This is the REST ID used in the products service. Either lineItemId or productId is required.
&quot;quantity&quot;: 42, # The quantity to cancel.
- &quot;reason&quot;: &quot;A String&quot;, # The reason for the cancellation. Acceptable values are: - &quot;`customerInitiatedCancel`&quot; - &quot;`invalidCoupon`&quot; - &quot;`malformedShippingAddress`&quot; - &quot;`noInventory`&quot; - &quot;`other`&quot; - &quot;`priceError`&quot; - &quot;`shippingPriceError`&quot; - &quot;`taxError`&quot; - &quot;`undeliverableShippingAddress`&quot; - &quot;`unsupportedPoBoxAddress`&quot;
+ &quot;reason&quot;: &quot;A String&quot;, # The reason for the cancellation. Acceptable values are: - &quot;`customerInitiatedCancel`&quot; - &quot;`invalidCoupon`&quot; - &quot;`malformedShippingAddress`&quot; - &quot;`noInventory`&quot; - &quot;`other`&quot; - &quot;`priceError`&quot; - &quot;`shippingPriceError`&quot; - &quot;`taxError`&quot; - &quot;`undeliverableShippingAddress`&quot; - &quot;`unsupportedPoBoxAddress`&quot; - &quot;`failedToCaptureFunds`&quot;
&quot;reasonText&quot;: &quot;A String&quot;, # The explanation of the reason.
}
@@ -287,7 +287,7 @@ Returns:
<div class="method">
<code class="details" id="captureOrder">captureOrder(merchantId, orderId, body=None, x__xgafv=None)</code>
- <pre>Capture funds from the customer for the current order total. This method should be called after the merchant verifies that they are able and ready to start shipping the order. This method blocks until a response is received from the payment processsor. If this method succeeds, the merchant is guaranteed to receive funds for the order after shipment. If the request fails, it can be retried or the order may be cancelled. This method cannot be called after the entire order is already shipped.
+ <pre>Capture funds from the customer for the current order total. This method should be called after the merchant verifies that they are able and ready to start shipping the order. This method blocks until a response is received from the payment processsor. If this method succeeds, the merchant is guaranteed to receive funds for the order after shipment. If the request fails, it can be retried or the order may be cancelled. This method cannot be called after the entire order is already shipped. A rejected error code is returned when the payment service provider has declined the charge. This indicates a problem between the PSP and either the merchant&#x27;s or customer&#x27;s account. Sometimes this error will be resolved by the customer. We recommend retrying these errors once per day or cancelling the order with reason `failedToCaptureFunds` if the items cannot be held.
Args:
merchantId: string, Required. The ID of the account that manages the order. This cannot be a multi-client account. (required)
@@ -617,7 +617,7 @@ Returns:
&quot;actor&quot;: &quot;A String&quot;, # The actor that created the cancellation. Acceptable values are: - &quot;`customer`&quot; - &quot;`googleBot`&quot; - &quot;`googleCustomerService`&quot; - &quot;`googlePayments`&quot; - &quot;`googleSabre`&quot; - &quot;`merchant`&quot;
&quot;creationDate&quot;: &quot;A String&quot;, # Date on which the cancellation has been created, in ISO 8601 format.
&quot;quantity&quot;: 42, # The quantity that was canceled.
- &quot;reason&quot;: &quot;A String&quot;, # The reason for the cancellation. Orders that are canceled with a noInventory reason will lead to the removal of the product from Buy on Google until you make an update to that product. This will not affect your Shopping ads. Acceptable values are: - &quot;`autoPostInternal`&quot; - &quot;`autoPostInvalidBillingAddress`&quot; - &quot;`autoPostNoInventory`&quot; - &quot;`autoPostPriceError`&quot; - &quot;`autoPostUndeliverableShippingAddress`&quot; - &quot;`couponAbuse`&quot; - &quot;`customerCanceled`&quot; - &quot;`customerInitiatedCancel`&quot; - &quot;`customerSupportRequested`&quot; - &quot;`failToPushOrderGoogleError`&quot; - &quot;`failToPushOrderMerchantError`&quot; - &quot;`failToPushOrderMerchantFulfillmentError`&quot; - &quot;`failToPushOrderToMerchant`&quot; - &quot;`failToPushOrderToMerchantOutOfStock`&quot; - &quot;`invalidCoupon`&quot; - &quot;`malformedShippingAddress`&quot; - &quot;`merchantDidNotShipOnTime`&quot; - &quot;`noInventory`&quot; - &quot;`orderTimeout`&quot; - &quot;`other`&quot; - &quot;`paymentAbuse`&quot; - &quot;`paymentDeclined`&quot; - &quot;`priceError`&quot; - &quot;`returnRefundAbuse`&quot; - &quot;`shippingPriceError`&quot; - &quot;`taxError`&quot; - &quot;`undeliverableShippingAddress`&quot; - &quot;`unsupportedPoBoxAddress`&quot;
+ &quot;reason&quot;: &quot;A String&quot;, # The reason for the cancellation. Orders that are canceled with a noInventory reason will lead to the removal of the product from Buy on Google until you make an update to that product. This will not affect your Shopping ads. Acceptable values are: - &quot;`autoPostInternal`&quot; - &quot;`autoPostInvalidBillingAddress`&quot; - &quot;`autoPostNoInventory`&quot; - &quot;`autoPostPriceError`&quot; - &quot;`autoPostUndeliverableShippingAddress`&quot; - &quot;`couponAbuse`&quot; - &quot;`customerCanceled`&quot; - &quot;`customerInitiatedCancel`&quot; - &quot;`customerSupportRequested`&quot; - &quot;`failToPushOrderGoogleError`&quot; - &quot;`failToPushOrderMerchantError`&quot; - &quot;`failToPushOrderMerchantFulfillmentError`&quot; - &quot;`failToPushOrderToMerchant`&quot; - &quot;`failToPushOrderToMerchantOutOfStock`&quot; - &quot;`invalidCoupon`&quot; - &quot;`malformedShippingAddress`&quot; - &quot;`merchantDidNotShipOnTime`&quot; - &quot;`noInventory`&quot; - &quot;`orderTimeout`&quot; - &quot;`other`&quot; - &quot;`paymentAbuse`&quot; - &quot;`paymentDeclined`&quot; - &quot;`priceError`&quot; - &quot;`returnRefundAbuse`&quot; - &quot;`shippingPriceError`&quot; - &quot;`taxError`&quot; - &quot;`undeliverableShippingAddress`&quot; - &quot;`unsupportedPoBoxAddress`&quot; - &quot;`failedToCaptureFunds`&quot;
&quot;reasonText&quot;: &quot;A String&quot;, # The explanation of the reason.
},
],
@@ -915,7 +915,7 @@ Returns:
&quot;actor&quot;: &quot;A String&quot;, # The actor that created the cancellation. Acceptable values are: - &quot;`customer`&quot; - &quot;`googleBot`&quot; - &quot;`googleCustomerService`&quot; - &quot;`googlePayments`&quot; - &quot;`googleSabre`&quot; - &quot;`merchant`&quot;
&quot;creationDate&quot;: &quot;A String&quot;, # Date on which the cancellation has been created, in ISO 8601 format.
&quot;quantity&quot;: 42, # The quantity that was canceled.
- &quot;reason&quot;: &quot;A String&quot;, # The reason for the cancellation. Orders that are canceled with a noInventory reason will lead to the removal of the product from Buy on Google until you make an update to that product. This will not affect your Shopping ads. Acceptable values are: - &quot;`autoPostInternal`&quot; - &quot;`autoPostInvalidBillingAddress`&quot; - &quot;`autoPostNoInventory`&quot; - &quot;`autoPostPriceError`&quot; - &quot;`autoPostUndeliverableShippingAddress`&quot; - &quot;`couponAbuse`&quot; - &quot;`customerCanceled`&quot; - &quot;`customerInitiatedCancel`&quot; - &quot;`customerSupportRequested`&quot; - &quot;`failToPushOrderGoogleError`&quot; - &quot;`failToPushOrderMerchantError`&quot; - &quot;`failToPushOrderMerchantFulfillmentError`&quot; - &quot;`failToPushOrderToMerchant`&quot; - &quot;`failToPushOrderToMerchantOutOfStock`&quot; - &quot;`invalidCoupon`&quot; - &quot;`malformedShippingAddress`&quot; - &quot;`merchantDidNotShipOnTime`&quot; - &quot;`noInventory`&quot; - &quot;`orderTimeout`&quot; - &quot;`other`&quot; - &quot;`paymentAbuse`&quot; - &quot;`paymentDeclined`&quot; - &quot;`priceError`&quot; - &quot;`returnRefundAbuse`&quot; - &quot;`shippingPriceError`&quot; - &quot;`taxError`&quot; - &quot;`undeliverableShippingAddress`&quot; - &quot;`unsupportedPoBoxAddress`&quot;
+ &quot;reason&quot;: &quot;A String&quot;, # The reason for the cancellation. Orders that are canceled with a noInventory reason will lead to the removal of the product from Buy on Google until you make an update to that product. This will not affect your Shopping ads. Acceptable values are: - &quot;`autoPostInternal`&quot; - &quot;`autoPostInvalidBillingAddress`&quot; - &quot;`autoPostNoInventory`&quot; - &quot;`autoPostPriceError`&quot; - &quot;`autoPostUndeliverableShippingAddress`&quot; - &quot;`couponAbuse`&quot; - &quot;`customerCanceled`&quot; - &quot;`customerInitiatedCancel`&quot; - &quot;`customerSupportRequested`&quot; - &quot;`failToPushOrderGoogleError`&quot; - &quot;`failToPushOrderMerchantError`&quot; - &quot;`failToPushOrderMerchantFulfillmentError`&quot; - &quot;`failToPushOrderToMerchant`&quot; - &quot;`failToPushOrderToMerchantOutOfStock`&quot; - &quot;`invalidCoupon`&quot; - &quot;`malformedShippingAddress`&quot; - &quot;`merchantDidNotShipOnTime`&quot; - &quot;`noInventory`&quot; - &quot;`orderTimeout`&quot; - &quot;`other`&quot; - &quot;`paymentAbuse`&quot; - &quot;`paymentDeclined`&quot; - &quot;`priceError`&quot; - &quot;`returnRefundAbuse`&quot; - &quot;`shippingPriceError`&quot; - &quot;`taxError`&quot; - &quot;`undeliverableShippingAddress`&quot; - &quot;`unsupportedPoBoxAddress`&quot; - &quot;`failedToCaptureFunds`&quot;
&quot;reasonText&quot;: &quot;A String&quot;, # The explanation of the reason.
},
],
@@ -1448,7 +1448,7 @@ Returns:
&quot;actor&quot;: &quot;A String&quot;, # The actor that created the cancellation. Acceptable values are: - &quot;`customer`&quot; - &quot;`googleBot`&quot; - &quot;`googleCustomerService`&quot; - &quot;`googlePayments`&quot; - &quot;`googleSabre`&quot; - &quot;`merchant`&quot;
&quot;creationDate&quot;: &quot;A String&quot;, # Date on which the cancellation has been created, in ISO 8601 format.
&quot;quantity&quot;: 42, # The quantity that was canceled.
- &quot;reason&quot;: &quot;A String&quot;, # The reason for the cancellation. Orders that are canceled with a noInventory reason will lead to the removal of the product from Buy on Google until you make an update to that product. This will not affect your Shopping ads. Acceptable values are: - &quot;`autoPostInternal`&quot; - &quot;`autoPostInvalidBillingAddress`&quot; - &quot;`autoPostNoInventory`&quot; - &quot;`autoPostPriceError`&quot; - &quot;`autoPostUndeliverableShippingAddress`&quot; - &quot;`couponAbuse`&quot; - &quot;`customerCanceled`&quot; - &quot;`customerInitiatedCancel`&quot; - &quot;`customerSupportRequested`&quot; - &quot;`failToPushOrderGoogleError`&quot; - &quot;`failToPushOrderMerchantError`&quot; - &quot;`failToPushOrderMerchantFulfillmentError`&quot; - &quot;`failToPushOrderToMerchant`&quot; - &quot;`failToPushOrderToMerchantOutOfStock`&quot; - &quot;`invalidCoupon`&quot; - &quot;`malformedShippingAddress`&quot; - &quot;`merchantDidNotShipOnTime`&quot; - &quot;`noInventory`&quot; - &quot;`orderTimeout`&quot; - &quot;`other`&quot; - &quot;`paymentAbuse`&quot; - &quot;`paymentDeclined`&quot; - &quot;`priceError`&quot; - &quot;`returnRefundAbuse`&quot; - &quot;`shippingPriceError`&quot; - &quot;`taxError`&quot; - &quot;`undeliverableShippingAddress`&quot; - &quot;`unsupportedPoBoxAddress`&quot;
+ &quot;reason&quot;: &quot;A String&quot;, # The reason for the cancellation. Orders that are canceled with a noInventory reason will lead to the removal of the product from Buy on Google until you make an update to that product. This will not affect your Shopping ads. Acceptable values are: - &quot;`autoPostInternal`&quot; - &quot;`autoPostInvalidBillingAddress`&quot; - &quot;`autoPostNoInventory`&quot; - &quot;`autoPostPriceError`&quot; - &quot;`autoPostUndeliverableShippingAddress`&quot; - &quot;`couponAbuse`&quot; - &quot;`customerCanceled`&quot; - &quot;`customerInitiatedCancel`&quot; - &quot;`customerSupportRequested`&quot; - &quot;`failToPushOrderGoogleError`&quot; - &quot;`failToPushOrderMerchantError`&quot; - &quot;`failToPushOrderMerchantFulfillmentError`&quot; - &quot;`failToPushOrderToMerchant`&quot; - &quot;`failToPushOrderToMerchantOutOfStock`&quot; - &quot;`invalidCoupon`&quot; - &quot;`malformedShippingAddress`&quot; - &quot;`merchantDidNotShipOnTime`&quot; - &quot;`noInventory`&quot; - &quot;`orderTimeout`&quot; - &quot;`other`&quot; - &quot;`paymentAbuse`&quot; - &quot;`paymentDeclined`&quot; - &quot;`priceError`&quot; - &quot;`returnRefundAbuse`&quot; - &quot;`shippingPriceError`&quot; - &quot;`taxError`&quot; - &quot;`undeliverableShippingAddress`&quot; - &quot;`unsupportedPoBoxAddress`&quot; - &quot;`failedToCaptureFunds`&quot;
&quot;reasonText&quot;: &quot;A String&quot;, # The explanation of the reason.
},
],
diff --git a/docs/dyn/content_v2_1.promotions.html b/docs/dyn/content_v2_1.promotions.html
index ea0a830e4..a1088f7c1 100644
--- a/docs/dyn/content_v2_1.promotions.html
+++ b/docs/dyn/content_v2_1.promotions.html
@@ -148,6 +148,12 @@ Args:
&quot;orderLimit&quot;: 42, # Order limit for the promotion.
&quot;percentOff&quot;: 42, # The percentage discount offered in the promotion.
&quot;productApplicability&quot;: &quot;A String&quot;, # Required. Applicability of the promotion to either all products or only specific products.
+ &quot;productType&quot;: [ # Product filter by product type for the promotion.
+ &quot;A String&quot;,
+ ],
+ &quot;productTypeExclusion&quot;: [ # Product filter by product type exclusion for the promotion.
+ &quot;A String&quot;,
+ ],
&quot;promotionDestinationIds&quot;: [ # Destination ID for the promotion.
&quot;A String&quot;,
],
@@ -224,6 +230,12 @@ Returns:
&quot;orderLimit&quot;: 42, # Order limit for the promotion.
&quot;percentOff&quot;: 42, # The percentage discount offered in the promotion.
&quot;productApplicability&quot;: &quot;A String&quot;, # Required. Applicability of the promotion to either all products or only specific products.
+ &quot;productType&quot;: [ # Product filter by product type for the promotion.
+ &quot;A String&quot;,
+ ],
+ &quot;productTypeExclusion&quot;: [ # Product filter by product type exclusion for the promotion.
+ &quot;A String&quot;,
+ ],
&quot;promotionDestinationIds&quot;: [ # Destination ID for the promotion.
&quot;A String&quot;,
],
diff --git a/docs/dyn/datastore_v1.projects.html b/docs/dyn/datastore_v1.projects.html
index 96acf9cb8..4f71a3fd4 100644
--- a/docs/dyn/datastore_v1.projects.html
+++ b/docs/dyn/datastore_v1.projects.html
@@ -248,40 +248,7 @@ Args:
],
},
&quot;properties&quot;: { # The entity&#x27;s properties. The map&#x27;s keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `&quot;&quot;`.
- &quot;a_key&quot;: { # A message that can hold any of the supported value types and associated metadata.
- &quot;arrayValue&quot;: { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`.
- &quot;values&quot;: [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for &#x27;exclude_from_indexes&#x27;.
- # Object with schema name: Value
- ],
- },
- &quot;blobValue&quot;: &quot;A String&quot;, # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded.
- &quot;booleanValue&quot;: True or False, # A boolean value.
- &quot;doubleValue&quot;: 3.14, # A double value.
- &quot;entityValue&quot;: # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
- &quot;excludeFromIndexes&quot;: True or False, # If the value should be excluded from all indexes including those defined explicitly.
- &quot;geoPointValue&quot;: { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth.
- &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
- &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
- },
- &quot;integerValue&quot;: &quot;A String&quot;, # An integer value.
- &quot;keyValue&quot;: { # A unique identifier for an entity. If a key&#x27;s partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value.
- &quot;partitionId&quot;: { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `&quot;&quot;`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition.
- &quot;namespaceId&quot;: &quot;A String&quot;, # If not empty, the ID of the namespace to which the entities belong.
- &quot;projectId&quot;: &quot;A String&quot;, # The ID of the project to which the entities belong.
- },
- &quot;path&quot;: [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element&#x27;s _ancestors_. An entity path is always fully complete: *all* of the entity&#x27;s ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements.
- { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete.
- &quot;id&quot;: &quot;A String&quot;, # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future.
- &quot;kind&quot;: &quot;A String&quot;, # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
- &quot;name&quot;: &quot;A String&quot;, # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
- },
- ],
- },
- &quot;meaning&quot;: 42, # The `meaning` field should only be populated for backwards compatibility.
- &quot;nullValue&quot;: &quot;A String&quot;, # A null value.
- &quot;stringValue&quot;: &quot;A String&quot;, # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes.
- &quot;timestampValue&quot;: &quot;A String&quot;, # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down.
- },
+ &quot;a_key&quot;: # Object with schema name: Value
},
},
&quot;update&quot;: { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # The entity to update. The entity must already exist. Must have a complete key path.
@@ -299,40 +266,7 @@ Args:
],
},
&quot;properties&quot;: { # The entity&#x27;s properties. The map&#x27;s keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `&quot;&quot;`.
- &quot;a_key&quot;: { # A message that can hold any of the supported value types and associated metadata.
- &quot;arrayValue&quot;: { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`.
- &quot;values&quot;: [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for &#x27;exclude_from_indexes&#x27;.
- # Object with schema name: Value
- ],
- },
- &quot;blobValue&quot;: &quot;A String&quot;, # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded.
- &quot;booleanValue&quot;: True or False, # A boolean value.
- &quot;doubleValue&quot;: 3.14, # A double value.
- &quot;entityValue&quot;: # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
- &quot;excludeFromIndexes&quot;: True or False, # If the value should be excluded from all indexes including those defined explicitly.
- &quot;geoPointValue&quot;: { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth.
- &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
- &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
- },
- &quot;integerValue&quot;: &quot;A String&quot;, # An integer value.
- &quot;keyValue&quot;: { # A unique identifier for an entity. If a key&#x27;s partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value.
- &quot;partitionId&quot;: { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `&quot;&quot;`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition.
- &quot;namespaceId&quot;: &quot;A String&quot;, # If not empty, the ID of the namespace to which the entities belong.
- &quot;projectId&quot;: &quot;A String&quot;, # The ID of the project to which the entities belong.
- },
- &quot;path&quot;: [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element&#x27;s _ancestors_. An entity path is always fully complete: *all* of the entity&#x27;s ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements.
- { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete.
- &quot;id&quot;: &quot;A String&quot;, # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future.
- &quot;kind&quot;: &quot;A String&quot;, # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
- &quot;name&quot;: &quot;A String&quot;, # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
- },
- ],
- },
- &quot;meaning&quot;: 42, # The `meaning` field should only be populated for backwards compatibility.
- &quot;nullValue&quot;: &quot;A String&quot;, # A null value.
- &quot;stringValue&quot;: &quot;A String&quot;, # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes.
- &quot;timestampValue&quot;: &quot;A String&quot;, # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down.
- },
+ &quot;a_key&quot;: # Object with schema name: Value
},
},
&quot;upsert&quot;: { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # The entity to upsert. The entity may or may not already exist. The entity key&#x27;s final path element may be incomplete.
@@ -350,40 +284,7 @@ Args:
],
},
&quot;properties&quot;: { # The entity&#x27;s properties. The map&#x27;s keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `&quot;&quot;`.
- &quot;a_key&quot;: { # A message that can hold any of the supported value types and associated metadata.
- &quot;arrayValue&quot;: { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`.
- &quot;values&quot;: [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for &#x27;exclude_from_indexes&#x27;.
- # Object with schema name: Value
- ],
- },
- &quot;blobValue&quot;: &quot;A String&quot;, # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded.
- &quot;booleanValue&quot;: True or False, # A boolean value.
- &quot;doubleValue&quot;: 3.14, # A double value.
- &quot;entityValue&quot;: # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
- &quot;excludeFromIndexes&quot;: True or False, # If the value should be excluded from all indexes including those defined explicitly.
- &quot;geoPointValue&quot;: { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth.
- &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
- &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
- },
- &quot;integerValue&quot;: &quot;A String&quot;, # An integer value.
- &quot;keyValue&quot;: { # A unique identifier for an entity. If a key&#x27;s partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value.
- &quot;partitionId&quot;: { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `&quot;&quot;`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition.
- &quot;namespaceId&quot;: &quot;A String&quot;, # If not empty, the ID of the namespace to which the entities belong.
- &quot;projectId&quot;: &quot;A String&quot;, # The ID of the project to which the entities belong.
- },
- &quot;path&quot;: [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element&#x27;s _ancestors_. An entity path is always fully complete: *all* of the entity&#x27;s ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements.
- { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete.
- &quot;id&quot;: &quot;A String&quot;, # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future.
- &quot;kind&quot;: &quot;A String&quot;, # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
- &quot;name&quot;: &quot;A String&quot;, # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
- },
- ],
- },
- &quot;meaning&quot;: 42, # The `meaning` field should only be populated for backwards compatibility.
- &quot;nullValue&quot;: &quot;A String&quot;, # A null value.
- &quot;stringValue&quot;: &quot;A String&quot;, # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes.
- &quot;timestampValue&quot;: &quot;A String&quot;, # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down.
- },
+ &quot;a_key&quot;: # Object with schema name: Value
},
},
},
@@ -602,40 +503,7 @@ Returns:
],
},
&quot;properties&quot;: { # The entity&#x27;s properties. The map&#x27;s keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `&quot;&quot;`.
- &quot;a_key&quot;: { # A message that can hold any of the supported value types and associated metadata.
- &quot;arrayValue&quot;: { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`.
- &quot;values&quot;: [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for &#x27;exclude_from_indexes&#x27;.
- # Object with schema name: Value
- ],
- },
- &quot;blobValue&quot;: &quot;A String&quot;, # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded.
- &quot;booleanValue&quot;: True or False, # A boolean value.
- &quot;doubleValue&quot;: 3.14, # A double value.
- &quot;entityValue&quot;: # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
- &quot;excludeFromIndexes&quot;: True or False, # If the value should be excluded from all indexes including those defined explicitly.
- &quot;geoPointValue&quot;: { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth.
- &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
- &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
- },
- &quot;integerValue&quot;: &quot;A String&quot;, # An integer value.
- &quot;keyValue&quot;: { # A unique identifier for an entity. If a key&#x27;s partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value.
- &quot;partitionId&quot;: { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `&quot;&quot;`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition.
- &quot;namespaceId&quot;: &quot;A String&quot;, # If not empty, the ID of the namespace to which the entities belong.
- &quot;projectId&quot;: &quot;A String&quot;, # The ID of the project to which the entities belong.
- },
- &quot;path&quot;: [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element&#x27;s _ancestors_. An entity path is always fully complete: *all* of the entity&#x27;s ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements.
- { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete.
- &quot;id&quot;: &quot;A String&quot;, # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future.
- &quot;kind&quot;: &quot;A String&quot;, # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
- &quot;name&quot;: &quot;A String&quot;, # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
- },
- ],
- },
- &quot;meaning&quot;: 42, # The `meaning` field should only be populated for backwards compatibility.
- &quot;nullValue&quot;: &quot;A String&quot;, # A null value.
- &quot;stringValue&quot;: &quot;A String&quot;, # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes.
- &quot;timestampValue&quot;: &quot;A String&quot;, # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down.
- },
+ &quot;a_key&quot;: # Object with schema name: Value
},
},
&quot;version&quot;: &quot;A String&quot;, # The version of the entity, a strictly positive number that monotonically increases with changes to the entity. This field is set for `FULL` entity results. For missing entities in `LookupResponse`, this is the version of the snapshot that was used to look up the entity, and it is always set except for eventually consistent reads.
@@ -659,40 +527,7 @@ Returns:
],
},
&quot;properties&quot;: { # The entity&#x27;s properties. The map&#x27;s keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `&quot;&quot;`.
- &quot;a_key&quot;: { # A message that can hold any of the supported value types and associated metadata.
- &quot;arrayValue&quot;: { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`.
- &quot;values&quot;: [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for &#x27;exclude_from_indexes&#x27;.
- # Object with schema name: Value
- ],
- },
- &quot;blobValue&quot;: &quot;A String&quot;, # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded.
- &quot;booleanValue&quot;: True or False, # A boolean value.
- &quot;doubleValue&quot;: 3.14, # A double value.
- &quot;entityValue&quot;: # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
- &quot;excludeFromIndexes&quot;: True or False, # If the value should be excluded from all indexes including those defined explicitly.
- &quot;geoPointValue&quot;: { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth.
- &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
- &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
- },
- &quot;integerValue&quot;: &quot;A String&quot;, # An integer value.
- &quot;keyValue&quot;: { # A unique identifier for an entity. If a key&#x27;s partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value.
- &quot;partitionId&quot;: { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `&quot;&quot;`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition.
- &quot;namespaceId&quot;: &quot;A String&quot;, # If not empty, the ID of the namespace to which the entities belong.
- &quot;projectId&quot;: &quot;A String&quot;, # The ID of the project to which the entities belong.
- },
- &quot;path&quot;: [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element&#x27;s _ancestors_. An entity path is always fully complete: *all* of the entity&#x27;s ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements.
- { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete.
- &quot;id&quot;: &quot;A String&quot;, # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future.
- &quot;kind&quot;: &quot;A String&quot;, # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
- &quot;name&quot;: &quot;A String&quot;, # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
- },
- ],
- },
- &quot;meaning&quot;: 42, # The `meaning` field should only be populated for backwards compatibility.
- &quot;nullValue&quot;: &quot;A String&quot;, # A null value.
- &quot;stringValue&quot;: &quot;A String&quot;, # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes.
- &quot;timestampValue&quot;: &quot;A String&quot;, # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down.
- },
+ &quot;a_key&quot;: # Object with schema name: Value
},
},
&quot;version&quot;: &quot;A String&quot;, # The version of the entity, a strictly positive number that monotonically increases with changes to the entity. This field is set for `FULL` entity results. For missing entities in `LookupResponse`, this is the version of the snapshot that was used to look up the entity, and it is always set except for eventually consistent reads.
@@ -790,7 +625,24 @@ Args:
&quot;blobValue&quot;: &quot;A String&quot;, # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded.
&quot;booleanValue&quot;: True or False, # A boolean value.
&quot;doubleValue&quot;: 3.14, # A double value.
- &quot;entityValue&quot;: # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
+ &quot;entityValue&quot;: { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
+ &quot;key&quot;: { # A unique identifier for an entity. If a key&#x27;s partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity&#x27;s key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity&#x27;s kind is its key path&#x27;s last element&#x27;s kind, or null if it has no key.
+ &quot;partitionId&quot;: { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `&quot;&quot;`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition.
+ &quot;namespaceId&quot;: &quot;A String&quot;, # If not empty, the ID of the namespace to which the entities belong.
+ &quot;projectId&quot;: &quot;A String&quot;, # The ID of the project to which the entities belong.
+ },
+ &quot;path&quot;: [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element&#x27;s _ancestors_. An entity path is always fully complete: *all* of the entity&#x27;s ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements.
+ { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete.
+ &quot;id&quot;: &quot;A String&quot;, # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future.
+ &quot;kind&quot;: &quot;A String&quot;, # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
+ &quot;name&quot;: &quot;A String&quot;, # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
+ },
+ ],
+ },
+ &quot;properties&quot;: { # The entity&#x27;s properties. The map&#x27;s keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `&quot;&quot;`.
+ &quot;a_key&quot;: # Object with schema name: Value
+ },
+ },
&quot;excludeFromIndexes&quot;: True or False, # If the value should be excluded from all indexes including those defined explicitly.
&quot;geoPointValue&quot;: { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth.
&quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
@@ -829,7 +681,24 @@ Args:
&quot;blobValue&quot;: &quot;A String&quot;, # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded.
&quot;booleanValue&quot;: True or False, # A boolean value.
&quot;doubleValue&quot;: 3.14, # A double value.
- &quot;entityValue&quot;: # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
+ &quot;entityValue&quot;: { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
+ &quot;key&quot;: { # A unique identifier for an entity. If a key&#x27;s partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity&#x27;s key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity&#x27;s kind is its key path&#x27;s last element&#x27;s kind, or null if it has no key.
+ &quot;partitionId&quot;: { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `&quot;&quot;`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition.
+ &quot;namespaceId&quot;: &quot;A String&quot;, # If not empty, the ID of the namespace to which the entities belong.
+ &quot;projectId&quot;: &quot;A String&quot;, # The ID of the project to which the entities belong.
+ },
+ &quot;path&quot;: [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element&#x27;s _ancestors_. An entity path is always fully complete: *all* of the entity&#x27;s ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements.
+ { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete.
+ &quot;id&quot;: &quot;A String&quot;, # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future.
+ &quot;kind&quot;: &quot;A String&quot;, # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
+ &quot;name&quot;: &quot;A String&quot;, # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
+ },
+ ],
+ },
+ &quot;properties&quot;: { # The entity&#x27;s properties. The map&#x27;s keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `&quot;&quot;`.
+ &quot;a_key&quot;: # Object with schema name: Value
+ },
+ },
&quot;excludeFromIndexes&quot;: True or False, # If the value should be excluded from all indexes including those defined explicitly.
&quot;geoPointValue&quot;: { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth.
&quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
@@ -890,7 +759,24 @@ Args:
&quot;blobValue&quot;: &quot;A String&quot;, # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded.
&quot;booleanValue&quot;: True or False, # A boolean value.
&quot;doubleValue&quot;: 3.14, # A double value.
- &quot;entityValue&quot;: # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
+ &quot;entityValue&quot;: { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
+ &quot;key&quot;: { # A unique identifier for an entity. If a key&#x27;s partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity&#x27;s key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity&#x27;s kind is its key path&#x27;s last element&#x27;s kind, or null if it has no key.
+ &quot;partitionId&quot;: { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `&quot;&quot;`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition.
+ &quot;namespaceId&quot;: &quot;A String&quot;, # If not empty, the ID of the namespace to which the entities belong.
+ &quot;projectId&quot;: &quot;A String&quot;, # The ID of the project to which the entities belong.
+ },
+ &quot;path&quot;: [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element&#x27;s _ancestors_. An entity path is always fully complete: *all* of the entity&#x27;s ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements.
+ { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete.
+ &quot;id&quot;: &quot;A String&quot;, # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future.
+ &quot;kind&quot;: &quot;A String&quot;, # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
+ &quot;name&quot;: &quot;A String&quot;, # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
+ },
+ ],
+ },
+ &quot;properties&quot;: { # The entity&#x27;s properties. The map&#x27;s keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `&quot;&quot;`.
+ &quot;a_key&quot;: # Object with schema name: Value
+ },
+ },
&quot;excludeFromIndexes&quot;: True or False, # If the value should be excluded from all indexes including those defined explicitly.
&quot;geoPointValue&quot;: { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth.
&quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
@@ -977,40 +863,7 @@ Returns:
],
},
&quot;properties&quot;: { # The entity&#x27;s properties. The map&#x27;s keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `&quot;&quot;`.
- &quot;a_key&quot;: { # A message that can hold any of the supported value types and associated metadata.
- &quot;arrayValue&quot;: { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`.
- &quot;values&quot;: [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for &#x27;exclude_from_indexes&#x27;.
- # Object with schema name: Value
- ],
- },
- &quot;blobValue&quot;: &quot;A String&quot;, # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded.
- &quot;booleanValue&quot;: True or False, # A boolean value.
- &quot;doubleValue&quot;: 3.14, # A double value.
- &quot;entityValue&quot;: # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
- &quot;excludeFromIndexes&quot;: True or False, # If the value should be excluded from all indexes including those defined explicitly.
- &quot;geoPointValue&quot;: { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth.
- &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
- &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
- },
- &quot;integerValue&quot;: &quot;A String&quot;, # An integer value.
- &quot;keyValue&quot;: { # A unique identifier for an entity. If a key&#x27;s partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value.
- &quot;partitionId&quot;: { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `&quot;&quot;`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition.
- &quot;namespaceId&quot;: &quot;A String&quot;, # If not empty, the ID of the namespace to which the entities belong.
- &quot;projectId&quot;: &quot;A String&quot;, # The ID of the project to which the entities belong.
- },
- &quot;path&quot;: [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element&#x27;s _ancestors_. An entity path is always fully complete: *all* of the entity&#x27;s ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements.
- { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete.
- &quot;id&quot;: &quot;A String&quot;, # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future.
- &quot;kind&quot;: &quot;A String&quot;, # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
- &quot;name&quot;: &quot;A String&quot;, # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
- },
- ],
- },
- &quot;meaning&quot;: 42, # The `meaning` field should only be populated for backwards compatibility.
- &quot;nullValue&quot;: &quot;A String&quot;, # A null value.
- &quot;stringValue&quot;: &quot;A String&quot;, # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes.
- &quot;timestampValue&quot;: &quot;A String&quot;, # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down.
- },
+ &quot;a_key&quot;: # Object with schema name: Value
},
},
&quot;version&quot;: &quot;A String&quot;, # The version of the entity, a strictly positive number that monotonically increases with changes to the entity. This field is set for `FULL` entity results. For missing entities in `LookupResponse`, this is the version of the snapshot that was used to look up the entity, and it is always set except for eventually consistent reads.
@@ -1049,7 +902,24 @@ Returns:
&quot;blobValue&quot;: &quot;A String&quot;, # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded.
&quot;booleanValue&quot;: True or False, # A boolean value.
&quot;doubleValue&quot;: 3.14, # A double value.
- &quot;entityValue&quot;: # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
+ &quot;entityValue&quot;: { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
+ &quot;key&quot;: { # A unique identifier for an entity. If a key&#x27;s partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity&#x27;s key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity&#x27;s kind is its key path&#x27;s last element&#x27;s kind, or null if it has no key.
+ &quot;partitionId&quot;: { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `&quot;&quot;`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition.
+ &quot;namespaceId&quot;: &quot;A String&quot;, # If not empty, the ID of the namespace to which the entities belong.
+ &quot;projectId&quot;: &quot;A String&quot;, # The ID of the project to which the entities belong.
+ },
+ &quot;path&quot;: [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element&#x27;s _ancestors_. An entity path is always fully complete: *all* of the entity&#x27;s ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements.
+ { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete.
+ &quot;id&quot;: &quot;A String&quot;, # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future.
+ &quot;kind&quot;: &quot;A String&quot;, # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
+ &quot;name&quot;: &quot;A String&quot;, # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
+ },
+ ],
+ },
+ &quot;properties&quot;: { # The entity&#x27;s properties. The map&#x27;s keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `&quot;&quot;`.
+ &quot;a_key&quot;: # Object with schema name: Value
+ },
+ },
&quot;excludeFromIndexes&quot;: True or False, # If the value should be excluded from all indexes including those defined explicitly.
&quot;geoPointValue&quot;: { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth.
&quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
diff --git a/docs/dyn/datastore_v1beta3.projects.html b/docs/dyn/datastore_v1beta3.projects.html
index 5bd9b3632..68130019c 100644
--- a/docs/dyn/datastore_v1beta3.projects.html
+++ b/docs/dyn/datastore_v1beta3.projects.html
@@ -232,7 +232,40 @@ Args:
],
},
&quot;properties&quot;: { # The entity&#x27;s properties. The map&#x27;s keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `&quot;&quot;`.
- &quot;a_key&quot;: # Object with schema name: Value
+ &quot;a_key&quot;: { # A message that can hold any of the supported value types and associated metadata.
+ &quot;arrayValue&quot;: { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`.
+ &quot;values&quot;: [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for &#x27;exclude_from_indexes&#x27;.
+ # Object with schema name: Value
+ ],
+ },
+ &quot;blobValue&quot;: &quot;A String&quot;, # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded.
+ &quot;booleanValue&quot;: True or False, # A boolean value.
+ &quot;doubleValue&quot;: 3.14, # A double value.
+ &quot;entityValue&quot;: # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
+ &quot;excludeFromIndexes&quot;: True or False, # If the value should be excluded from all indexes including those defined explicitly.
+ &quot;geoPointValue&quot;: { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth.
+ &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+ &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+ },
+ &quot;integerValue&quot;: &quot;A String&quot;, # An integer value.
+ &quot;keyValue&quot;: { # A unique identifier for an entity. If a key&#x27;s partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value.
+ &quot;partitionId&quot;: { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `&quot;&quot;`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition.
+ &quot;namespaceId&quot;: &quot;A String&quot;, # If not empty, the ID of the namespace to which the entities belong.
+ &quot;projectId&quot;: &quot;A String&quot;, # The ID of the project to which the entities belong.
+ },
+ &quot;path&quot;: [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element&#x27;s _ancestors_. An entity path is always fully complete: *all* of the entity&#x27;s ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements.
+ { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete.
+ &quot;id&quot;: &quot;A String&quot;, # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future.
+ &quot;kind&quot;: &quot;A String&quot;, # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
+ &quot;name&quot;: &quot;A String&quot;, # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
+ },
+ ],
+ },
+ &quot;meaning&quot;: 42, # The `meaning` field should only be populated for backwards compatibility.
+ &quot;nullValue&quot;: &quot;A String&quot;, # A null value.
+ &quot;stringValue&quot;: &quot;A String&quot;, # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes.
+ &quot;timestampValue&quot;: &quot;A String&quot;, # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down.
+ },
},
},
&quot;update&quot;: { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # The entity to update. The entity must already exist. Must have a complete key path.
@@ -250,7 +283,40 @@ Args:
],
},
&quot;properties&quot;: { # The entity&#x27;s properties. The map&#x27;s keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `&quot;&quot;`.
- &quot;a_key&quot;: # Object with schema name: Value
+ &quot;a_key&quot;: { # A message that can hold any of the supported value types and associated metadata.
+ &quot;arrayValue&quot;: { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`.
+ &quot;values&quot;: [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for &#x27;exclude_from_indexes&#x27;.
+ # Object with schema name: Value
+ ],
+ },
+ &quot;blobValue&quot;: &quot;A String&quot;, # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded.
+ &quot;booleanValue&quot;: True or False, # A boolean value.
+ &quot;doubleValue&quot;: 3.14, # A double value.
+ &quot;entityValue&quot;: # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
+ &quot;excludeFromIndexes&quot;: True or False, # If the value should be excluded from all indexes including those defined explicitly.
+ &quot;geoPointValue&quot;: { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth.
+ &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+ &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+ },
+ &quot;integerValue&quot;: &quot;A String&quot;, # An integer value.
+ &quot;keyValue&quot;: { # A unique identifier for an entity. If a key&#x27;s partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value.
+ &quot;partitionId&quot;: { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `&quot;&quot;`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition.
+ &quot;namespaceId&quot;: &quot;A String&quot;, # If not empty, the ID of the namespace to which the entities belong.
+ &quot;projectId&quot;: &quot;A String&quot;, # The ID of the project to which the entities belong.
+ },
+ &quot;path&quot;: [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element&#x27;s _ancestors_. An entity path is always fully complete: *all* of the entity&#x27;s ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements.
+ { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete.
+ &quot;id&quot;: &quot;A String&quot;, # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future.
+ &quot;kind&quot;: &quot;A String&quot;, # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
+ &quot;name&quot;: &quot;A String&quot;, # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
+ },
+ ],
+ },
+ &quot;meaning&quot;: 42, # The `meaning` field should only be populated for backwards compatibility.
+ &quot;nullValue&quot;: &quot;A String&quot;, # A null value.
+ &quot;stringValue&quot;: &quot;A String&quot;, # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes.
+ &quot;timestampValue&quot;: &quot;A String&quot;, # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down.
+ },
},
},
&quot;upsert&quot;: { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # The entity to upsert. The entity may or may not already exist. The entity key&#x27;s final path element may be incomplete.
@@ -268,7 +334,40 @@ Args:
],
},
&quot;properties&quot;: { # The entity&#x27;s properties. The map&#x27;s keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `&quot;&quot;`.
- &quot;a_key&quot;: # Object with schema name: Value
+ &quot;a_key&quot;: { # A message that can hold any of the supported value types and associated metadata.
+ &quot;arrayValue&quot;: { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`.
+ &quot;values&quot;: [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for &#x27;exclude_from_indexes&#x27;.
+ # Object with schema name: Value
+ ],
+ },
+ &quot;blobValue&quot;: &quot;A String&quot;, # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded.
+ &quot;booleanValue&quot;: True or False, # A boolean value.
+ &quot;doubleValue&quot;: 3.14, # A double value.
+ &quot;entityValue&quot;: # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
+ &quot;excludeFromIndexes&quot;: True or False, # If the value should be excluded from all indexes including those defined explicitly.
+ &quot;geoPointValue&quot;: { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth.
+ &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+ &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+ },
+ &quot;integerValue&quot;: &quot;A String&quot;, # An integer value.
+ &quot;keyValue&quot;: { # A unique identifier for an entity. If a key&#x27;s partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value.
+ &quot;partitionId&quot;: { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `&quot;&quot;`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition.
+ &quot;namespaceId&quot;: &quot;A String&quot;, # If not empty, the ID of the namespace to which the entities belong.
+ &quot;projectId&quot;: &quot;A String&quot;, # The ID of the project to which the entities belong.
+ },
+ &quot;path&quot;: [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element&#x27;s _ancestors_. An entity path is always fully complete: *all* of the entity&#x27;s ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements.
+ { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete.
+ &quot;id&quot;: &quot;A String&quot;, # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future.
+ &quot;kind&quot;: &quot;A String&quot;, # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
+ &quot;name&quot;: &quot;A String&quot;, # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
+ },
+ ],
+ },
+ &quot;meaning&quot;: 42, # The `meaning` field should only be populated for backwards compatibility.
+ &quot;nullValue&quot;: &quot;A String&quot;, # A null value.
+ &quot;stringValue&quot;: &quot;A String&quot;, # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes.
+ &quot;timestampValue&quot;: &quot;A String&quot;, # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down.
+ },
},
},
},
@@ -381,7 +480,40 @@ Returns:
],
},
&quot;properties&quot;: { # The entity&#x27;s properties. The map&#x27;s keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `&quot;&quot;`.
- &quot;a_key&quot;: # Object with schema name: Value
+ &quot;a_key&quot;: { # A message that can hold any of the supported value types and associated metadata.
+ &quot;arrayValue&quot;: { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`.
+ &quot;values&quot;: [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for &#x27;exclude_from_indexes&#x27;.
+ # Object with schema name: Value
+ ],
+ },
+ &quot;blobValue&quot;: &quot;A String&quot;, # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded.
+ &quot;booleanValue&quot;: True or False, # A boolean value.
+ &quot;doubleValue&quot;: 3.14, # A double value.
+ &quot;entityValue&quot;: # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
+ &quot;excludeFromIndexes&quot;: True or False, # If the value should be excluded from all indexes including those defined explicitly.
+ &quot;geoPointValue&quot;: { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth.
+ &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+ &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+ },
+ &quot;integerValue&quot;: &quot;A String&quot;, # An integer value.
+ &quot;keyValue&quot;: { # A unique identifier for an entity. If a key&#x27;s partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value.
+ &quot;partitionId&quot;: { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `&quot;&quot;`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition.
+ &quot;namespaceId&quot;: &quot;A String&quot;, # If not empty, the ID of the namespace to which the entities belong.
+ &quot;projectId&quot;: &quot;A String&quot;, # The ID of the project to which the entities belong.
+ },
+ &quot;path&quot;: [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element&#x27;s _ancestors_. An entity path is always fully complete: *all* of the entity&#x27;s ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements.
+ { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete.
+ &quot;id&quot;: &quot;A String&quot;, # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future.
+ &quot;kind&quot;: &quot;A String&quot;, # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
+ &quot;name&quot;: &quot;A String&quot;, # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
+ },
+ ],
+ },
+ &quot;meaning&quot;: 42, # The `meaning` field should only be populated for backwards compatibility.
+ &quot;nullValue&quot;: &quot;A String&quot;, # A null value.
+ &quot;stringValue&quot;: &quot;A String&quot;, # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes.
+ &quot;timestampValue&quot;: &quot;A String&quot;, # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down.
+ },
},
},
&quot;version&quot;: &quot;A String&quot;, # The version of the entity, a strictly positive number that monotonically increases with changes to the entity. This field is set for `FULL` entity results. For missing entities in `LookupResponse`, this is the version of the snapshot that was used to look up the entity, and it is always set except for eventually consistent reads.
@@ -405,7 +537,40 @@ Returns:
],
},
&quot;properties&quot;: { # The entity&#x27;s properties. The map&#x27;s keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `&quot;&quot;`.
- &quot;a_key&quot;: # Object with schema name: Value
+ &quot;a_key&quot;: { # A message that can hold any of the supported value types and associated metadata.
+ &quot;arrayValue&quot;: { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`.
+ &quot;values&quot;: [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for &#x27;exclude_from_indexes&#x27;.
+ # Object with schema name: Value
+ ],
+ },
+ &quot;blobValue&quot;: &quot;A String&quot;, # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded.
+ &quot;booleanValue&quot;: True or False, # A boolean value.
+ &quot;doubleValue&quot;: 3.14, # A double value.
+ &quot;entityValue&quot;: # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
+ &quot;excludeFromIndexes&quot;: True or False, # If the value should be excluded from all indexes including those defined explicitly.
+ &quot;geoPointValue&quot;: { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth.
+ &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+ &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+ },
+ &quot;integerValue&quot;: &quot;A String&quot;, # An integer value.
+ &quot;keyValue&quot;: { # A unique identifier for an entity. If a key&#x27;s partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value.
+ &quot;partitionId&quot;: { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `&quot;&quot;`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition.
+ &quot;namespaceId&quot;: &quot;A String&quot;, # If not empty, the ID of the namespace to which the entities belong.
+ &quot;projectId&quot;: &quot;A String&quot;, # The ID of the project to which the entities belong.
+ },
+ &quot;path&quot;: [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element&#x27;s _ancestors_. An entity path is always fully complete: *all* of the entity&#x27;s ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements.
+ { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete.
+ &quot;id&quot;: &quot;A String&quot;, # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future.
+ &quot;kind&quot;: &quot;A String&quot;, # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
+ &quot;name&quot;: &quot;A String&quot;, # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
+ },
+ ],
+ },
+ &quot;meaning&quot;: 42, # The `meaning` field should only be populated for backwards compatibility.
+ &quot;nullValue&quot;: &quot;A String&quot;, # A null value.
+ &quot;stringValue&quot;: &quot;A String&quot;, # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes.
+ &quot;timestampValue&quot;: &quot;A String&quot;, # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down.
+ },
},
},
&quot;version&quot;: &quot;A String&quot;, # The version of the entity, a strictly positive number that monotonically increases with changes to the entity. This field is set for `FULL` entity results. For missing entities in `LookupResponse`, this is the version of the snapshot that was used to look up the entity, and it is always set except for eventually consistent reads.
@@ -503,24 +668,7 @@ Args:
&quot;blobValue&quot;: &quot;A String&quot;, # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded.
&quot;booleanValue&quot;: True or False, # A boolean value.
&quot;doubleValue&quot;: 3.14, # A double value.
- &quot;entityValue&quot;: { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
- &quot;key&quot;: { # A unique identifier for an entity. If a key&#x27;s partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity&#x27;s key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity&#x27;s kind is its key path&#x27;s last element&#x27;s kind, or null if it has no key.
- &quot;partitionId&quot;: { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `&quot;&quot;`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition.
- &quot;namespaceId&quot;: &quot;A String&quot;, # If not empty, the ID of the namespace to which the entities belong.
- &quot;projectId&quot;: &quot;A String&quot;, # The ID of the project to which the entities belong.
- },
- &quot;path&quot;: [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element&#x27;s _ancestors_. An entity path is always fully complete: *all* of the entity&#x27;s ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements.
- { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete.
- &quot;id&quot;: &quot;A String&quot;, # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future.
- &quot;kind&quot;: &quot;A String&quot;, # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
- &quot;name&quot;: &quot;A String&quot;, # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
- },
- ],
- },
- &quot;properties&quot;: { # The entity&#x27;s properties. The map&#x27;s keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `&quot;&quot;`.
- &quot;a_key&quot;: # Object with schema name: Value
- },
- },
+ &quot;entityValue&quot;: # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
&quot;excludeFromIndexes&quot;: True or False, # If the value should be excluded from all indexes including those defined explicitly.
&quot;geoPointValue&quot;: { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth.
&quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
@@ -559,24 +707,7 @@ Args:
&quot;blobValue&quot;: &quot;A String&quot;, # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded.
&quot;booleanValue&quot;: True or False, # A boolean value.
&quot;doubleValue&quot;: 3.14, # A double value.
- &quot;entityValue&quot;: { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
- &quot;key&quot;: { # A unique identifier for an entity. If a key&#x27;s partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity&#x27;s key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity&#x27;s kind is its key path&#x27;s last element&#x27;s kind, or null if it has no key.
- &quot;partitionId&quot;: { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `&quot;&quot;`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition.
- &quot;namespaceId&quot;: &quot;A String&quot;, # If not empty, the ID of the namespace to which the entities belong.
- &quot;projectId&quot;: &quot;A String&quot;, # The ID of the project to which the entities belong.
- },
- &quot;path&quot;: [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element&#x27;s _ancestors_. An entity path is always fully complete: *all* of the entity&#x27;s ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements.
- { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete.
- &quot;id&quot;: &quot;A String&quot;, # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future.
- &quot;kind&quot;: &quot;A String&quot;, # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
- &quot;name&quot;: &quot;A String&quot;, # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
- },
- ],
- },
- &quot;properties&quot;: { # The entity&#x27;s properties. The map&#x27;s keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `&quot;&quot;`.
- &quot;a_key&quot;: # Object with schema name: Value
- },
- },
+ &quot;entityValue&quot;: # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
&quot;excludeFromIndexes&quot;: True or False, # If the value should be excluded from all indexes including those defined explicitly.
&quot;geoPointValue&quot;: { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth.
&quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
@@ -637,24 +768,7 @@ Args:
&quot;blobValue&quot;: &quot;A String&quot;, # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded.
&quot;booleanValue&quot;: True or False, # A boolean value.
&quot;doubleValue&quot;: 3.14, # A double value.
- &quot;entityValue&quot;: { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
- &quot;key&quot;: { # A unique identifier for an entity. If a key&#x27;s partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity&#x27;s key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity&#x27;s kind is its key path&#x27;s last element&#x27;s kind, or null if it has no key.
- &quot;partitionId&quot;: { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `&quot;&quot;`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition.
- &quot;namespaceId&quot;: &quot;A String&quot;, # If not empty, the ID of the namespace to which the entities belong.
- &quot;projectId&quot;: &quot;A String&quot;, # The ID of the project to which the entities belong.
- },
- &quot;path&quot;: [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element&#x27;s _ancestors_. An entity path is always fully complete: *all* of the entity&#x27;s ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements.
- { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete.
- &quot;id&quot;: &quot;A String&quot;, # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future.
- &quot;kind&quot;: &quot;A String&quot;, # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
- &quot;name&quot;: &quot;A String&quot;, # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
- },
- ],
- },
- &quot;properties&quot;: { # The entity&#x27;s properties. The map&#x27;s keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `&quot;&quot;`.
- &quot;a_key&quot;: # Object with schema name: Value
- },
- },
+ &quot;entityValue&quot;: # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
&quot;excludeFromIndexes&quot;: True or False, # If the value should be excluded from all indexes including those defined explicitly.
&quot;geoPointValue&quot;: { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth.
&quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
@@ -741,7 +855,40 @@ Returns:
],
},
&quot;properties&quot;: { # The entity&#x27;s properties. The map&#x27;s keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `&quot;&quot;`.
- &quot;a_key&quot;: # Object with schema name: Value
+ &quot;a_key&quot;: { # A message that can hold any of the supported value types and associated metadata.
+ &quot;arrayValue&quot;: { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`.
+ &quot;values&quot;: [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for &#x27;exclude_from_indexes&#x27;.
+ # Object with schema name: Value
+ ],
+ },
+ &quot;blobValue&quot;: &quot;A String&quot;, # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded.
+ &quot;booleanValue&quot;: True or False, # A boolean value.
+ &quot;doubleValue&quot;: 3.14, # A double value.
+ &quot;entityValue&quot;: # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
+ &quot;excludeFromIndexes&quot;: True or False, # If the value should be excluded from all indexes including those defined explicitly.
+ &quot;geoPointValue&quot;: { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth.
+ &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+ &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+ },
+ &quot;integerValue&quot;: &quot;A String&quot;, # An integer value.
+ &quot;keyValue&quot;: { # A unique identifier for an entity. If a key&#x27;s partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value.
+ &quot;partitionId&quot;: { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `&quot;&quot;`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition.
+ &quot;namespaceId&quot;: &quot;A String&quot;, # If not empty, the ID of the namespace to which the entities belong.
+ &quot;projectId&quot;: &quot;A String&quot;, # The ID of the project to which the entities belong.
+ },
+ &quot;path&quot;: [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element&#x27;s _ancestors_. An entity path is always fully complete: *all* of the entity&#x27;s ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements.
+ { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete.
+ &quot;id&quot;: &quot;A String&quot;, # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future.
+ &quot;kind&quot;: &quot;A String&quot;, # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
+ &quot;name&quot;: &quot;A String&quot;, # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
+ },
+ ],
+ },
+ &quot;meaning&quot;: 42, # The `meaning` field should only be populated for backwards compatibility.
+ &quot;nullValue&quot;: &quot;A String&quot;, # A null value.
+ &quot;stringValue&quot;: &quot;A String&quot;, # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes.
+ &quot;timestampValue&quot;: &quot;A String&quot;, # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down.
+ },
},
},
&quot;version&quot;: &quot;A String&quot;, # The version of the entity, a strictly positive number that monotonically increases with changes to the entity. This field is set for `FULL` entity results. For missing entities in `LookupResponse`, this is the version of the snapshot that was used to look up the entity, and it is always set except for eventually consistent reads.
@@ -780,24 +927,7 @@ Returns:
&quot;blobValue&quot;: &quot;A String&quot;, # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded.
&quot;booleanValue&quot;: True or False, # A boolean value.
&quot;doubleValue&quot;: 3.14, # A double value.
- &quot;entityValue&quot;: { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
- &quot;key&quot;: { # A unique identifier for an entity. If a key&#x27;s partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity&#x27;s key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity&#x27;s kind is its key path&#x27;s last element&#x27;s kind, or null if it has no key.
- &quot;partitionId&quot;: { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `&quot;&quot;`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition.
- &quot;namespaceId&quot;: &quot;A String&quot;, # If not empty, the ID of the namespace to which the entities belong.
- &quot;projectId&quot;: &quot;A String&quot;, # The ID of the project to which the entities belong.
- },
- &quot;path&quot;: [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element&#x27;s _ancestors_. An entity path is always fully complete: *all* of the entity&#x27;s ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements.
- { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete.
- &quot;id&quot;: &quot;A String&quot;, # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future.
- &quot;kind&quot;: &quot;A String&quot;, # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
- &quot;name&quot;: &quot;A String&quot;, # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `&quot;&quot;`.
- },
- ],
- },
- &quot;properties&quot;: { # The entity&#x27;s properties. The map&#x27;s keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `&quot;&quot;`.
- &quot;a_key&quot;: # Object with schema name: Value
- },
- },
+ &quot;entityValue&quot;: # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
&quot;excludeFromIndexes&quot;: True or False, # If the value should be excluded from all indexes including those defined explicitly.
&quot;geoPointValue&quot;: { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth.
&quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
diff --git a/docs/dyn/dialogflow_v2beta1.projects.conversations.participants.html b/docs/dyn/dialogflow_v2beta1.projects.conversations.participants.html
index 1c848046b..8a3755210 100644
--- a/docs/dyn/dialogflow_v2beta1.projects.conversations.participants.html
+++ b/docs/dyn/dialogflow_v2beta1.projects.conversations.participants.html
@@ -1591,6 +1591,10 @@ Returns:
&quot;payload&quot;: { # Returns a response containing a custom, platform-specific payload.
&quot;a_key&quot;: &quot;&quot;, # Properties of the object.
},
+ &quot;telephonyTransferCall&quot;: { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint.
+ &quot;phoneNumber&quot;: &quot;A String&quot;, # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164).
+ &quot;sipUri&quot;: &quot;A String&quot;, # Transfer the call to a SIP endpoint.
+ },
&quot;text&quot;: { # The text response message. # Returns a text response.
&quot;text&quot;: [ # A collection of text responses.
&quot;A String&quot;,
diff --git a/docs/dyn/dialogflow_v2beta1.projects.locations.conversations.participants.html b/docs/dyn/dialogflow_v2beta1.projects.locations.conversations.participants.html
index 8f87ab7ad..fbe74bbd9 100644
--- a/docs/dyn/dialogflow_v2beta1.projects.locations.conversations.participants.html
+++ b/docs/dyn/dialogflow_v2beta1.projects.locations.conversations.participants.html
@@ -1591,6 +1591,10 @@ Returns:
&quot;payload&quot;: { # Returns a response containing a custom, platform-specific payload.
&quot;a_key&quot;: &quot;&quot;, # Properties of the object.
},
+ &quot;telephonyTransferCall&quot;: { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint.
+ &quot;phoneNumber&quot;: &quot;A String&quot;, # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164).
+ &quot;sipUri&quot;: &quot;A String&quot;, # Transfer the call to a SIP endpoint.
+ },
&quot;text&quot;: { # The text response message. # Returns a text response.
&quot;text&quot;: [ # A collection of text responses.
&quot;A String&quot;,
diff --git a/docs/dyn/dialogflow_v3.projects.locations.agents.environments.deployments.html b/docs/dyn/dialogflow_v3.projects.locations.agents.environments.deployments.html
new file mode 100644
index 000000000..ca466cb26
--- /dev/null
+++ b/docs/dyn/dialogflow_v3.projects.locations.agents.environments.deployments.html
@@ -0,0 +1,174 @@
+<html><body>
+<style>
+
+body, h1, h2, h3, div, span, p, pre, a {
+ margin: 0;
+ padding: 0;
+ border: 0;
+ font-weight: inherit;
+ font-style: inherit;
+ font-size: 100%;
+ font-family: inherit;
+ vertical-align: baseline;
+}
+
+body {
+ font-size: 13px;
+ padding: 1em;
+}
+
+h1 {
+ font-size: 26px;
+ margin-bottom: 1em;
+}
+
+h2 {
+ font-size: 24px;
+ margin-bottom: 1em;
+}
+
+h3 {
+ font-size: 20px;
+ margin-bottom: 1em;
+ margin-top: 1em;
+}
+
+pre, code {
+ line-height: 1.5;
+ font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
+}
+
+pre {
+ margin-top: 0.5em;
+}
+
+h1, h2, h3, p {
+ font-family: Arial, sans serif;
+}
+
+h1, h2, h3 {
+ border-bottom: solid #CCC 1px;
+}
+
+.toc_element {
+ margin-top: 0.5em;
+}
+
+.firstline {
+ margin-left: 2 em;
+}
+
+.method {
+ margin-top: 1em;
+ border: solid 1px #CCC;
+ padding: 1em;
+ background: #EEE;
+}
+
+.details {
+ font-weight: bold;
+ font-size: 14px;
+}
+
+</style>
+
+<h1><a href="dialogflow_v3.html">Dialogflow API</a> . <a href="dialogflow_v3.projects.html">projects</a> . <a href="dialogflow_v3.projects.locations.html">locations</a> . <a href="dialogflow_v3.projects.locations.agents.html">agents</a> . <a href="dialogflow_v3.projects.locations.agents.environments.html">environments</a> . <a href="dialogflow_v3.projects.locations.agents.environments.deployments.html">deployments</a></h1>
+<h2>Instance Methods</h2>
+<p class="toc_element">
+ <code><a href="#close">close()</a></code></p>
+<p class="firstline">Close httplib2 connections.</p>
+<p class="toc_element">
+ <code><a href="#get">get(name, x__xgafv=None)</a></code></p>
+<p class="firstline">Retrieves the specified Deployment.</p>
+<p class="toc_element">
+ <code><a href="#list">list(parent, pageSize=None, pageToken=None, x__xgafv=None)</a></code></p>
+<p class="firstline">Returns the list of all deployments in the specified Environment.</p>
+<p class="toc_element">
+ <code><a href="#list_next">list_next(previous_request, previous_response)</a></code></p>
+<p class="firstline">Retrieves the next page of results.</p>
+<h3>Method Details</h3>
+<div class="method">
+ <code class="details" id="close">close()</code>
+ <pre>Close httplib2 connections.</pre>
+</div>
+
+<div class="method">
+ <code class="details" id="get">get(name, x__xgafv=None)</code>
+ <pre>Retrieves the specified Deployment.
+
+Args:
+ name: string, Required. The name of the Deployment. Format: `projects//locations//agents//environments//deployments/`. (required)
+ x__xgafv: string, V1 error format.
+ Allowed values
+ 1 - v1 error format
+ 2 - v2 error format
+
+Returns:
+ An object of the form:
+
+ { # Represents an deployment in an environment. A deployment happens when a flow version configured to be active in the environment. You can configure running pre-deployment steps, e.g. running validation test cases, experiment auto-rollout, etc.
+ &quot;endTime&quot;: &quot;A String&quot;, # End time of this deployment.
+ &quot;flowVersion&quot;: &quot;A String&quot;, # The name of the flow version for this deployment. Format: projects//locations//agents//flows//versions/.
+ &quot;name&quot;: &quot;A String&quot;, # The name of the deployment. Format: projects//locations//agents//environments//deployments/.
+ &quot;result&quot;: { # Result of the deployment. # Result of the deployment.
+ &quot;deploymentTestResults&quot;: [ # Results of test cases running before the deployment. Format: `projects//locations//agents//testCases//results/`.
+ &quot;A String&quot;,
+ ],
+ &quot;experiment&quot;: &quot;A String&quot;, # The name of the experiment triggered by this deployment. Format: projects//locations//agents//environments//experiments/.
+ },
+ &quot;startTime&quot;: &quot;A String&quot;, # Start time of this deployment.
+ &quot;state&quot;: &quot;A String&quot;, # The current state of the deployment.
+}</pre>
+</div>
+
+<div class="method">
+ <code class="details" id="list">list(parent, pageSize=None, pageToken=None, x__xgafv=None)</code>
+ <pre>Returns the list of all deployments in the specified Environment.
+
+Args:
+ parent: string, Required. The Environment to list all environments for. Format: `projects//locations//agents//environments/`. (required)
+ pageSize: integer, The maximum number of items to return in a single page. By default 20 and at most 100.
+ pageToken: string, The next_page_token value returned from a previous list request.
+ x__xgafv: string, V1 error format.
+ Allowed values
+ 1 - v1 error format
+ 2 - v2 error format
+
+Returns:
+ An object of the form:
+
+ { # The response message for Deployments.ListDeployments.
+ &quot;deployments&quot;: [ # The list of deployments. There will be a maximum number of items returned based on the page_size field in the request. The list may in some cases be empty or contain fewer entries than page_size even if this isn&#x27;t the last page.
+ { # Represents an deployment in an environment. A deployment happens when a flow version configured to be active in the environment. You can configure running pre-deployment steps, e.g. running validation test cases, experiment auto-rollout, etc.
+ &quot;endTime&quot;: &quot;A String&quot;, # End time of this deployment.
+ &quot;flowVersion&quot;: &quot;A String&quot;, # The name of the flow version for this deployment. Format: projects//locations//agents//flows//versions/.
+ &quot;name&quot;: &quot;A String&quot;, # The name of the deployment. Format: projects//locations//agents//environments//deployments/.
+ &quot;result&quot;: { # Result of the deployment. # Result of the deployment.
+ &quot;deploymentTestResults&quot;: [ # Results of test cases running before the deployment. Format: `projects//locations//agents//testCases//results/`.
+ &quot;A String&quot;,
+ ],
+ &quot;experiment&quot;: &quot;A String&quot;, # The name of the experiment triggered by this deployment. Format: projects//locations//agents//environments//experiments/.
+ },
+ &quot;startTime&quot;: &quot;A String&quot;, # Start time of this deployment.
+ &quot;state&quot;: &quot;A String&quot;, # The current state of the deployment.
+ },
+ ],
+ &quot;nextPageToken&quot;: &quot;A String&quot;, # Token to retrieve the next page of results, or empty if there are no more results in the list.
+}</pre>
+</div>
+
+<div class="method">
+ <code class="details" id="list_next">list_next(previous_request, previous_response)</code>
+ <pre>Retrieves the next page of results.
+
+Args:
+ previous_request: The request for the previous page. (required)
+ previous_response: The response from the request for the previous page. (required)
+
+Returns:
+ A request object that you can call &#x27;execute()&#x27; on to request the next
+ page. Returns None if there are no more items in the collection.
+ </pre>
+</div>
+
+</body></html> \ No newline at end of file
diff --git a/docs/dyn/dialogflow_v3.projects.locations.agents.environments.html b/docs/dyn/dialogflow_v3.projects.locations.agents.environments.html
index 9b34fd345..89d53baf1 100644
--- a/docs/dyn/dialogflow_v3.projects.locations.agents.environments.html
+++ b/docs/dyn/dialogflow_v3.projects.locations.agents.environments.html
@@ -80,6 +80,11 @@ h1, h2, h3 {
<p class="firstline">Returns the continuousTestResults Resource.</p>
<p class="toc_element">
+ <code><a href="dialogflow_v3.projects.locations.agents.environments.deployments.html">deployments()</a></code>
+</p>
+<p class="firstline">Returns the deployments Resource.</p>
+
+<p class="toc_element">
<code><a href="dialogflow_v3.projects.locations.agents.environments.experiments.html">experiments()</a></code>
</p>
<p class="firstline">Returns the experiments Resource.</p>
@@ -99,6 +104,9 @@ h1, h2, h3 {
<code><a href="#delete">delete(name, x__xgafv=None)</a></code></p>
<p class="firstline">Deletes the specified Environment.</p>
<p class="toc_element">
+ <code><a href="#deployFlow">deployFlow(environment, body=None, x__xgafv=None)</a></code></p>
+<p class="firstline">Deploys a flow to the specified Environment. This method is a [long-running operation](https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation). The returned `Operation` type has the following method-specific fields: - `metadata`: DeployFlowMetadata - `response`: DeployFlowResponse</p>
+<p class="toc_element">
<code><a href="#get">get(name, x__xgafv=None)</a></code></p>
<p class="firstline">Retrieves the specified Environment.</p>
<p class="toc_element">
@@ -138,6 +146,13 @@ Args:
&quot;description&quot;: &quot;A String&quot;, # The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.
&quot;displayName&quot;: &quot;A String&quot;, # Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters.
&quot;name&quot;: &quot;A String&quot;, # The name of the environment. Format: `projects//locations//agents//environments/`.
+ &quot;testCasesConfig&quot;: { # The configuration for continuous tests. # The test cases config for continuous tests of this environment.
+ &quot;enableContinuousRun&quot;: True or False, # Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.
+ &quot;enablePredeploymentRun&quot;: True or False, # Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.
+ &quot;testCases&quot;: [ # A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`
+ &quot;A String&quot;,
+ ],
+ },
&quot;updateTime&quot;: &quot;A String&quot;, # Output only. Update time of this environment.
&quot;versionConfigs&quot;: [ # Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned.
{ # Configuration for the version.
@@ -194,6 +209,48 @@ Returns:
</div>
<div class="method">
+ <code class="details" id="deployFlow">deployFlow(environment, body=None, x__xgafv=None)</code>
+ <pre>Deploys a flow to the specified Environment. This method is a [long-running operation](https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation). The returned `Operation` type has the following method-specific fields: - `metadata`: DeployFlowMetadata - `response`: DeployFlowResponse
+
+Args:
+ environment: string, Required. The environment to deploy the flow to. Format: `projects//locations//agents// environments/`. (required)
+ body: object, The request body.
+ The object takes the form of:
+
+{ # The request message for Environments.DeployFlow.
+ &quot;flowVersion&quot;: &quot;A String&quot;, # Required. The flow version to deploy. Format: `projects//locations//agents// flows//versions/`.
+}
+
+ x__xgafv: string, V1 error format.
+ Allowed values
+ 1 - v1 error format
+ 2 - v2 error format
+
+Returns:
+ An object of the form:
+
+ { # This resource represents a long-running operation that is the result of a network API call.
+ &quot;done&quot;: True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+ &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+ &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
+ &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+ {
+ &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
+ },
+ ],
+ &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+ },
+ &quot;metadata&quot;: { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+ &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
+ },
+ &quot;name&quot;: &quot;A String&quot;, # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+ &quot;response&quot;: { # The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+ &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
+ },
+}</pre>
+</div>
+
+<div class="method">
<code class="details" id="get">get(name, x__xgafv=None)</code>
<pre>Retrieves the specified Environment.
@@ -211,6 +268,13 @@ Returns:
&quot;description&quot;: &quot;A String&quot;, # The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.
&quot;displayName&quot;: &quot;A String&quot;, # Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters.
&quot;name&quot;: &quot;A String&quot;, # The name of the environment. Format: `projects//locations//agents//environments/`.
+ &quot;testCasesConfig&quot;: { # The configuration for continuous tests. # The test cases config for continuous tests of this environment.
+ &quot;enableContinuousRun&quot;: True or False, # Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.
+ &quot;enablePredeploymentRun&quot;: True or False, # Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.
+ &quot;testCases&quot;: [ # A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`
+ &quot;A String&quot;,
+ ],
+ },
&quot;updateTime&quot;: &quot;A String&quot;, # Output only. Update time of this environment.
&quot;versionConfigs&quot;: [ # Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned.
{ # Configuration for the version.
@@ -242,6 +306,13 @@ Returns:
&quot;description&quot;: &quot;A String&quot;, # The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.
&quot;displayName&quot;: &quot;A String&quot;, # Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters.
&quot;name&quot;: &quot;A String&quot;, # The name of the environment. Format: `projects//locations//agents//environments/`.
+ &quot;testCasesConfig&quot;: { # The configuration for continuous tests. # The test cases config for continuous tests of this environment.
+ &quot;enableContinuousRun&quot;: True or False, # Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.
+ &quot;enablePredeploymentRun&quot;: True or False, # Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.
+ &quot;testCases&quot;: [ # A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`
+ &quot;A String&quot;,
+ ],
+ },
&quot;updateTime&quot;: &quot;A String&quot;, # Output only. Update time of this environment.
&quot;versionConfigs&quot;: [ # Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned.
{ # Configuration for the version.
@@ -290,6 +361,13 @@ Returns:
&quot;description&quot;: &quot;A String&quot;, # The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.
&quot;displayName&quot;: &quot;A String&quot;, # Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters.
&quot;name&quot;: &quot;A String&quot;, # The name of the environment. Format: `projects//locations//agents//environments/`.
+ &quot;testCasesConfig&quot;: { # The configuration for continuous tests. # The test cases config for continuous tests of this environment.
+ &quot;enableContinuousRun&quot;: True or False, # Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.
+ &quot;enablePredeploymentRun&quot;: True or False, # Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.
+ &quot;testCases&quot;: [ # A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`
+ &quot;A String&quot;,
+ ],
+ },
&quot;updateTime&quot;: &quot;A String&quot;, # Output only. Update time of this environment.
&quot;versionConfigs&quot;: [ # Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned.
{ # Configuration for the version.
@@ -329,6 +407,13 @@ Args:
&quot;description&quot;: &quot;A String&quot;, # The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.
&quot;displayName&quot;: &quot;A String&quot;, # Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters.
&quot;name&quot;: &quot;A String&quot;, # The name of the environment. Format: `projects//locations//agents//environments/`.
+ &quot;testCasesConfig&quot;: { # The configuration for continuous tests. # The test cases config for continuous tests of this environment.
+ &quot;enableContinuousRun&quot;: True or False, # Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.
+ &quot;enablePredeploymentRun&quot;: True or False, # Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.
+ &quot;testCases&quot;: [ # A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`
+ &quot;A String&quot;,
+ ],
+ },
&quot;updateTime&quot;: &quot;A String&quot;, # Output only. Update time of this environment.
&quot;versionConfigs&quot;: [ # Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned.
{ # Configuration for the version.
diff --git a/docs/dyn/dialogflow_v3.projects.locations.securitySettings.html b/docs/dyn/dialogflow_v3.projects.locations.securitySettings.html
index 21c868770..383f9fa70 100644
--- a/docs/dyn/dialogflow_v3.projects.locations.securitySettings.html
+++ b/docs/dyn/dialogflow_v3.projects.locations.securitySettings.html
@@ -117,7 +117,7 @@ Args:
&quot;enableInsightsExport&quot;: True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers.
},
&quot;inspectTemplate&quot;: &quot;A String&quot;, # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`.
- &quot;name&quot;: &quot;A String&quot;, # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`.
+ &quot;name&quot;: &quot;A String&quot;, # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`.
&quot;purgeDataTypes&quot;: [ # List of types of data to remove when retention settings triggers purge.
&quot;A String&quot;,
],
@@ -141,7 +141,7 @@ Returns:
&quot;enableInsightsExport&quot;: True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers.
},
&quot;inspectTemplate&quot;: &quot;A String&quot;, # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`.
- &quot;name&quot;: &quot;A String&quot;, # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`.
+ &quot;name&quot;: &quot;A String&quot;, # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`.
&quot;purgeDataTypes&quot;: [ # List of types of data to remove when retention settings triggers purge.
&quot;A String&quot;,
],
@@ -190,7 +190,7 @@ Returns:
&quot;enableInsightsExport&quot;: True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers.
},
&quot;inspectTemplate&quot;: &quot;A String&quot;, # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`.
- &quot;name&quot;: &quot;A String&quot;, # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`.
+ &quot;name&quot;: &quot;A String&quot;, # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`.
&quot;purgeDataTypes&quot;: [ # List of types of data to remove when retention settings triggers purge.
&quot;A String&quot;,
],
@@ -226,7 +226,7 @@ Returns:
&quot;enableInsightsExport&quot;: True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers.
},
&quot;inspectTemplate&quot;: &quot;A String&quot;, # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`.
- &quot;name&quot;: &quot;A String&quot;, # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`.
+ &quot;name&quot;: &quot;A String&quot;, # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`.
&quot;purgeDataTypes&quot;: [ # List of types of data to remove when retention settings triggers purge.
&quot;A String&quot;,
],
@@ -257,7 +257,7 @@ Returns:
<pre>Updates the specified SecuritySettings.
Args:
- name: string, Required. Resource name of the settings. Format: `projects//locations//securitySettings/`. (required)
+ name: string, Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`. (required)
body: object, The request body.
The object takes the form of:
@@ -268,7 +268,7 @@ Args:
&quot;enableInsightsExport&quot;: True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers.
},
&quot;inspectTemplate&quot;: &quot;A String&quot;, # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`.
- &quot;name&quot;: &quot;A String&quot;, # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`.
+ &quot;name&quot;: &quot;A String&quot;, # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`.
&quot;purgeDataTypes&quot;: [ # List of types of data to remove when retention settings triggers purge.
&quot;A String&quot;,
],
@@ -293,7 +293,7 @@ Returns:
&quot;enableInsightsExport&quot;: True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers.
},
&quot;inspectTemplate&quot;: &quot;A String&quot;, # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`.
- &quot;name&quot;: &quot;A String&quot;, # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`.
+ &quot;name&quot;: &quot;A String&quot;, # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`.
&quot;purgeDataTypes&quot;: [ # List of types of data to remove when retention settings triggers purge.
&quot;A String&quot;,
],
diff --git a/docs/dyn/dialogflow_v3beta1.projects.locations.agents.environments.deployments.html b/docs/dyn/dialogflow_v3beta1.projects.locations.agents.environments.deployments.html
new file mode 100644
index 000000000..0c253617a
--- /dev/null
+++ b/docs/dyn/dialogflow_v3beta1.projects.locations.agents.environments.deployments.html
@@ -0,0 +1,174 @@
+<html><body>
+<style>
+
+body, h1, h2, h3, div, span, p, pre, a {
+ margin: 0;
+ padding: 0;
+ border: 0;
+ font-weight: inherit;
+ font-style: inherit;
+ font-size: 100%;
+ font-family: inherit;
+ vertical-align: baseline;
+}
+
+body {
+ font-size: 13px;
+ padding: 1em;
+}
+
+h1 {
+ font-size: 26px;
+ margin-bottom: 1em;
+}
+
+h2 {
+ font-size: 24px;
+ margin-bottom: 1em;
+}
+
+h3 {
+ font-size: 20px;
+ margin-bottom: 1em;
+ margin-top: 1em;
+}
+
+pre, code {
+ line-height: 1.5;
+ font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
+}
+
+pre {
+ margin-top: 0.5em;
+}
+
+h1, h2, h3, p {
+ font-family: Arial, sans serif;
+}
+
+h1, h2, h3 {
+ border-bottom: solid #CCC 1px;
+}
+
+.toc_element {
+ margin-top: 0.5em;
+}
+
+.firstline {
+ margin-left: 2 em;
+}
+
+.method {
+ margin-top: 1em;
+ border: solid 1px #CCC;
+ padding: 1em;
+ background: #EEE;
+}
+
+.details {
+ font-weight: bold;
+ font-size: 14px;
+}
+
+</style>
+
+<h1><a href="dialogflow_v3beta1.html">Dialogflow API</a> . <a href="dialogflow_v3beta1.projects.html">projects</a> . <a href="dialogflow_v3beta1.projects.locations.html">locations</a> . <a href="dialogflow_v3beta1.projects.locations.agents.html">agents</a> . <a href="dialogflow_v3beta1.projects.locations.agents.environments.html">environments</a> . <a href="dialogflow_v3beta1.projects.locations.agents.environments.deployments.html">deployments</a></h1>
+<h2>Instance Methods</h2>
+<p class="toc_element">
+ <code><a href="#close">close()</a></code></p>
+<p class="firstline">Close httplib2 connections.</p>
+<p class="toc_element">
+ <code><a href="#get">get(name, x__xgafv=None)</a></code></p>
+<p class="firstline">Retrieves the specified Deployment.</p>
+<p class="toc_element">
+ <code><a href="#list">list(parent, pageSize=None, pageToken=None, x__xgafv=None)</a></code></p>
+<p class="firstline">Returns the list of all deployments in the specified Environment.</p>
+<p class="toc_element">
+ <code><a href="#list_next">list_next(previous_request, previous_response)</a></code></p>
+<p class="firstline">Retrieves the next page of results.</p>
+<h3>Method Details</h3>
+<div class="method">
+ <code class="details" id="close">close()</code>
+ <pre>Close httplib2 connections.</pre>
+</div>
+
+<div class="method">
+ <code class="details" id="get">get(name, x__xgafv=None)</code>
+ <pre>Retrieves the specified Deployment.
+
+Args:
+ name: string, Required. The name of the Deployment. Format: `projects//locations//agents//environments//deployments/`. (required)
+ x__xgafv: string, V1 error format.
+ Allowed values
+ 1 - v1 error format
+ 2 - v2 error format
+
+Returns:
+ An object of the form:
+
+ { # Represents an deployment in an environment. A deployment happens when a flow version configured to be active in the environment. You can configure running pre-deployment steps, e.g. running validation test cases, experiment auto-rollout, etc.
+ &quot;endTime&quot;: &quot;A String&quot;, # End time of this deployment.
+ &quot;flowVersion&quot;: &quot;A String&quot;, # The name of the flow version for this deployment. Format: projects//locations//agents//flows//versions/.
+ &quot;name&quot;: &quot;A String&quot;, # The name of the deployment. Format: projects//locations//agents//environments//deployments/.
+ &quot;result&quot;: { # Result of the deployment. # Result of the deployment.
+ &quot;deploymentTestResults&quot;: [ # Results of test cases running before the deployment. Format: `projects//locations//agents//testCases//results/`.
+ &quot;A String&quot;,
+ ],
+ &quot;experiment&quot;: &quot;A String&quot;, # The name of the experiment triggered by this deployment. Format: projects//locations//agents//environments//experiments/.
+ },
+ &quot;startTime&quot;: &quot;A String&quot;, # Start time of this deployment.
+ &quot;state&quot;: &quot;A String&quot;, # The current state of the deployment.
+}</pre>
+</div>
+
+<div class="method">
+ <code class="details" id="list">list(parent, pageSize=None, pageToken=None, x__xgafv=None)</code>
+ <pre>Returns the list of all deployments in the specified Environment.
+
+Args:
+ parent: string, Required. The Environment to list all environments for. Format: `projects//locations//agents//environments/`. (required)
+ pageSize: integer, The maximum number of items to return in a single page. By default 20 and at most 100.
+ pageToken: string, The next_page_token value returned from a previous list request.
+ x__xgafv: string, V1 error format.
+ Allowed values
+ 1 - v1 error format
+ 2 - v2 error format
+
+Returns:
+ An object of the form:
+
+ { # The response message for Deployments.ListDeployments.
+ &quot;deployments&quot;: [ # The list of deployments. There will be a maximum number of items returned based on the page_size field in the request. The list may in some cases be empty or contain fewer entries than page_size even if this isn&#x27;t the last page.
+ { # Represents an deployment in an environment. A deployment happens when a flow version configured to be active in the environment. You can configure running pre-deployment steps, e.g. running validation test cases, experiment auto-rollout, etc.
+ &quot;endTime&quot;: &quot;A String&quot;, # End time of this deployment.
+ &quot;flowVersion&quot;: &quot;A String&quot;, # The name of the flow version for this deployment. Format: projects//locations//agents//flows//versions/.
+ &quot;name&quot;: &quot;A String&quot;, # The name of the deployment. Format: projects//locations//agents//environments//deployments/.
+ &quot;result&quot;: { # Result of the deployment. # Result of the deployment.
+ &quot;deploymentTestResults&quot;: [ # Results of test cases running before the deployment. Format: `projects//locations//agents//testCases//results/`.
+ &quot;A String&quot;,
+ ],
+ &quot;experiment&quot;: &quot;A String&quot;, # The name of the experiment triggered by this deployment. Format: projects//locations//agents//environments//experiments/.
+ },
+ &quot;startTime&quot;: &quot;A String&quot;, # Start time of this deployment.
+ &quot;state&quot;: &quot;A String&quot;, # The current state of the deployment.
+ },
+ ],
+ &quot;nextPageToken&quot;: &quot;A String&quot;, # Token to retrieve the next page of results, or empty if there are no more results in the list.
+}</pre>
+</div>
+
+<div class="method">
+ <code class="details" id="list_next">list_next(previous_request, previous_response)</code>
+ <pre>Retrieves the next page of results.
+
+Args:
+ previous_request: The request for the previous page. (required)
+ previous_response: The response from the request for the previous page. (required)
+
+Returns:
+ A request object that you can call &#x27;execute()&#x27; on to request the next
+ page. Returns None if there are no more items in the collection.
+ </pre>
+</div>
+
+</body></html> \ No newline at end of file
diff --git a/docs/dyn/dialogflow_v3beta1.projects.locations.agents.environments.html b/docs/dyn/dialogflow_v3beta1.projects.locations.agents.environments.html
index 3b30c6cc5..83b03ac49 100644
--- a/docs/dyn/dialogflow_v3beta1.projects.locations.agents.environments.html
+++ b/docs/dyn/dialogflow_v3beta1.projects.locations.agents.environments.html
@@ -80,6 +80,11 @@ h1, h2, h3 {
<p class="firstline">Returns the continuousTestResults Resource.</p>
<p class="toc_element">
+ <code><a href="dialogflow_v3beta1.projects.locations.agents.environments.deployments.html">deployments()</a></code>
+</p>
+<p class="firstline">Returns the deployments Resource.</p>
+
+<p class="toc_element">
<code><a href="dialogflow_v3beta1.projects.locations.agents.environments.experiments.html">experiments()</a></code>
</p>
<p class="firstline">Returns the experiments Resource.</p>
@@ -99,6 +104,9 @@ h1, h2, h3 {
<code><a href="#delete">delete(name, x__xgafv=None)</a></code></p>
<p class="firstline">Deletes the specified Environment.</p>
<p class="toc_element">
+ <code><a href="#deployFlow">deployFlow(environment, body=None, x__xgafv=None)</a></code></p>
+<p class="firstline">Deploys a flow to the specified Environment. This method is a [long-running operation](https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation). The returned `Operation` type has the following method-specific fields: - `metadata`: DeployFlowMetadata - `response`: DeployFlowResponse</p>
+<p class="toc_element">
<code><a href="#get">get(name, x__xgafv=None)</a></code></p>
<p class="firstline">Retrieves the specified Environment.</p>
<p class="toc_element">
@@ -138,6 +146,13 @@ Args:
&quot;description&quot;: &quot;A String&quot;, # The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.
&quot;displayName&quot;: &quot;A String&quot;, # Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters.
&quot;name&quot;: &quot;A String&quot;, # The name of the environment. Format: `projects//locations//agents//environments/`.
+ &quot;testCasesConfig&quot;: { # The configuration for continuous tests. # The test cases config for continuous tests of this environment.
+ &quot;enableContinuousRun&quot;: True or False, # Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.
+ &quot;enablePredeploymentRun&quot;: True or False, # Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.
+ &quot;testCases&quot;: [ # A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`
+ &quot;A String&quot;,
+ ],
+ },
&quot;updateTime&quot;: &quot;A String&quot;, # Output only. Update time of this environment.
&quot;versionConfigs&quot;: [ # Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned.
{ # Configuration for the version.
@@ -194,6 +209,48 @@ Returns:
</div>
<div class="method">
+ <code class="details" id="deployFlow">deployFlow(environment, body=None, x__xgafv=None)</code>
+ <pre>Deploys a flow to the specified Environment. This method is a [long-running operation](https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation). The returned `Operation` type has the following method-specific fields: - `metadata`: DeployFlowMetadata - `response`: DeployFlowResponse
+
+Args:
+ environment: string, Required. The environment to deploy the flow to. Format: `projects//locations//agents// environments/`. (required)
+ body: object, The request body.
+ The object takes the form of:
+
+{ # The request message for Environments.DeployFlow.
+ &quot;flowVersion&quot;: &quot;A String&quot;, # Required. The flow version to deploy. Format: `projects//locations//agents// flows//versions/`.
+}
+
+ x__xgafv: string, V1 error format.
+ Allowed values
+ 1 - v1 error format
+ 2 - v2 error format
+
+Returns:
+ An object of the form:
+
+ { # This resource represents a long-running operation that is the result of a network API call.
+ &quot;done&quot;: True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+ &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+ &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
+ &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+ {
+ &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
+ },
+ ],
+ &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+ },
+ &quot;metadata&quot;: { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+ &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
+ },
+ &quot;name&quot;: &quot;A String&quot;, # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+ &quot;response&quot;: { # The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+ &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
+ },
+}</pre>
+</div>
+
+<div class="method">
<code class="details" id="get">get(name, x__xgafv=None)</code>
<pre>Retrieves the specified Environment.
@@ -211,6 +268,13 @@ Returns:
&quot;description&quot;: &quot;A String&quot;, # The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.
&quot;displayName&quot;: &quot;A String&quot;, # Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters.
&quot;name&quot;: &quot;A String&quot;, # The name of the environment. Format: `projects//locations//agents//environments/`.
+ &quot;testCasesConfig&quot;: { # The configuration for continuous tests. # The test cases config for continuous tests of this environment.
+ &quot;enableContinuousRun&quot;: True or False, # Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.
+ &quot;enablePredeploymentRun&quot;: True or False, # Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.
+ &quot;testCases&quot;: [ # A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`
+ &quot;A String&quot;,
+ ],
+ },
&quot;updateTime&quot;: &quot;A String&quot;, # Output only. Update time of this environment.
&quot;versionConfigs&quot;: [ # Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned.
{ # Configuration for the version.
@@ -242,6 +306,13 @@ Returns:
&quot;description&quot;: &quot;A String&quot;, # The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.
&quot;displayName&quot;: &quot;A String&quot;, # Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters.
&quot;name&quot;: &quot;A String&quot;, # The name of the environment. Format: `projects//locations//agents//environments/`.
+ &quot;testCasesConfig&quot;: { # The configuration for continuous tests. # The test cases config for continuous tests of this environment.
+ &quot;enableContinuousRun&quot;: True or False, # Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.
+ &quot;enablePredeploymentRun&quot;: True or False, # Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.
+ &quot;testCases&quot;: [ # A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`
+ &quot;A String&quot;,
+ ],
+ },
&quot;updateTime&quot;: &quot;A String&quot;, # Output only. Update time of this environment.
&quot;versionConfigs&quot;: [ # Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned.
{ # Configuration for the version.
@@ -290,6 +361,13 @@ Returns:
&quot;description&quot;: &quot;A String&quot;, # The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.
&quot;displayName&quot;: &quot;A String&quot;, # Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters.
&quot;name&quot;: &quot;A String&quot;, # The name of the environment. Format: `projects//locations//agents//environments/`.
+ &quot;testCasesConfig&quot;: { # The configuration for continuous tests. # The test cases config for continuous tests of this environment.
+ &quot;enableContinuousRun&quot;: True or False, # Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.
+ &quot;enablePredeploymentRun&quot;: True or False, # Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.
+ &quot;testCases&quot;: [ # A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`
+ &quot;A String&quot;,
+ ],
+ },
&quot;updateTime&quot;: &quot;A String&quot;, # Output only. Update time of this environment.
&quot;versionConfigs&quot;: [ # Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned.
{ # Configuration for the version.
@@ -329,6 +407,13 @@ Args:
&quot;description&quot;: &quot;A String&quot;, # The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.
&quot;displayName&quot;: &quot;A String&quot;, # Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters.
&quot;name&quot;: &quot;A String&quot;, # The name of the environment. Format: `projects//locations//agents//environments/`.
+ &quot;testCasesConfig&quot;: { # The configuration for continuous tests. # The test cases config for continuous tests of this environment.
+ &quot;enableContinuousRun&quot;: True or False, # Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.
+ &quot;enablePredeploymentRun&quot;: True or False, # Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.
+ &quot;testCases&quot;: [ # A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`
+ &quot;A String&quot;,
+ ],
+ },
&quot;updateTime&quot;: &quot;A String&quot;, # Output only. Update time of this environment.
&quot;versionConfigs&quot;: [ # Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned.
{ # Configuration for the version.
diff --git a/docs/dyn/dialogflow_v3beta1.projects.locations.securitySettings.html b/docs/dyn/dialogflow_v3beta1.projects.locations.securitySettings.html
index be27c70e5..b59f19d11 100644
--- a/docs/dyn/dialogflow_v3beta1.projects.locations.securitySettings.html
+++ b/docs/dyn/dialogflow_v3beta1.projects.locations.securitySettings.html
@@ -117,7 +117,7 @@ Args:
&quot;enableInsightsExport&quot;: True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers.
},
&quot;inspectTemplate&quot;: &quot;A String&quot;, # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`.
- &quot;name&quot;: &quot;A String&quot;, # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`.
+ &quot;name&quot;: &quot;A String&quot;, # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`.
&quot;purgeDataTypes&quot;: [ # List of types of data to remove when retention settings triggers purge.
&quot;A String&quot;,
],
@@ -141,7 +141,7 @@ Returns:
&quot;enableInsightsExport&quot;: True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers.
},
&quot;inspectTemplate&quot;: &quot;A String&quot;, # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`.
- &quot;name&quot;: &quot;A String&quot;, # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`.
+ &quot;name&quot;: &quot;A String&quot;, # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`.
&quot;purgeDataTypes&quot;: [ # List of types of data to remove when retention settings triggers purge.
&quot;A String&quot;,
],
@@ -190,7 +190,7 @@ Returns:
&quot;enableInsightsExport&quot;: True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers.
},
&quot;inspectTemplate&quot;: &quot;A String&quot;, # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`.
- &quot;name&quot;: &quot;A String&quot;, # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`.
+ &quot;name&quot;: &quot;A String&quot;, # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`.
&quot;purgeDataTypes&quot;: [ # List of types of data to remove when retention settings triggers purge.
&quot;A String&quot;,
],
@@ -226,7 +226,7 @@ Returns:
&quot;enableInsightsExport&quot;: True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers.
},
&quot;inspectTemplate&quot;: &quot;A String&quot;, # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`.
- &quot;name&quot;: &quot;A String&quot;, # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`.
+ &quot;name&quot;: &quot;A String&quot;, # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`.
&quot;purgeDataTypes&quot;: [ # List of types of data to remove when retention settings triggers purge.
&quot;A String&quot;,
],
@@ -257,7 +257,7 @@ Returns:
<pre>Updates the specified SecuritySettings.
Args:
- name: string, Required. Resource name of the settings. Format: `projects//locations//securitySettings/`. (required)
+ name: string, Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`. (required)
body: object, The request body.
The object takes the form of:
@@ -268,7 +268,7 @@ Args:
&quot;enableInsightsExport&quot;: True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers.
},
&quot;inspectTemplate&quot;: &quot;A String&quot;, # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`.
- &quot;name&quot;: &quot;A String&quot;, # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`.
+ &quot;name&quot;: &quot;A String&quot;, # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`.
&quot;purgeDataTypes&quot;: [ # List of types of data to remove when retention settings triggers purge.
&quot;A String&quot;,
],
@@ -293,7 +293,7 @@ Returns:
&quot;enableInsightsExport&quot;: True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers.
},
&quot;inspectTemplate&quot;: &quot;A String&quot;, # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`.
- &quot;name&quot;: &quot;A String&quot;, # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`.
+ &quot;name&quot;: &quot;A String&quot;, # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`.
&quot;purgeDataTypes&quot;: [ # List of types of data to remove when retention settings triggers purge.
&quot;A String&quot;,
],
diff --git a/docs/dyn/documentai_v1.projects.locations.processors.processorVersions.html b/docs/dyn/documentai_v1.projects.locations.processors.processorVersions.html
index 5a73e6246..fd8849ee4 100644
--- a/docs/dyn/documentai_v1.projects.locations.processors.processorVersions.html
+++ b/docs/dyn/documentai_v1.projects.locations.processors.processorVersions.html
@@ -281,7 +281,7 @@ Returns:
&quot;displayName&quot;: &quot;A String&quot;, # Display name to show to users.
&quot;entityTypes&quot;: [ # Entity types of the schema.
{ # EntityType is the wrapper of a label of the corresponding model with detailed attributes and limitations for entity-based processors. Multiple types can also compose a dependency tree to represent nested types.
- &quot;baseType&quot;: &quot;A String&quot;, # Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address.
+ &quot;baseType&quot;: &quot;A String&quot;, # Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address. `duration` - the entity is a duration.
&quot;description&quot;: &quot;A String&quot;, # Description of the entity type.
&quot;enumValues&quot;: [ # If specified, lists all the possible values for this entity.
&quot;A String&quot;,
@@ -342,7 +342,7 @@ Returns:
&quot;displayName&quot;: &quot;A String&quot;, # Display name to show to users.
&quot;entityTypes&quot;: [ # Entity types of the schema.
{ # EntityType is the wrapper of a label of the corresponding model with detailed attributes and limitations for entity-based processors. Multiple types can also compose a dependency tree to represent nested types.
- &quot;baseType&quot;: &quot;A String&quot;, # Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address.
+ &quot;baseType&quot;: &quot;A String&quot;, # Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address. `duration` - the entity is a duration.
&quot;description&quot;: &quot;A String&quot;, # Description of the entity type.
&quot;enumValues&quot;: [ # If specified, lists all the possible values for this entity.
&quot;A String&quot;,
diff --git a/docs/dyn/documentai_v1beta3.projects.locations.processors.processorVersions.html b/docs/dyn/documentai_v1beta3.projects.locations.processors.processorVersions.html
index 3a7811983..251c172be 100644
--- a/docs/dyn/documentai_v1beta3.projects.locations.processors.processorVersions.html
+++ b/docs/dyn/documentai_v1beta3.projects.locations.processors.processorVersions.html
@@ -290,7 +290,7 @@ Returns:
&quot;displayName&quot;: &quot;A String&quot;, # Display name to show to users.
&quot;entityTypes&quot;: [ # Entity types of the schema.
{ # EntityType is the wrapper of a label of the corresponding model with detailed attributes and limitations for entity-based processors. Multiple types can also compose a dependency tree to represent nested types.
- &quot;baseType&quot;: &quot;A String&quot;, # Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address.
+ &quot;baseType&quot;: &quot;A String&quot;, # Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address. `duration` - the entity is a duration.
&quot;description&quot;: &quot;A String&quot;, # Description of the entity type.
&quot;enumValues&quot;: [ # If specified, lists all the possible values for this entity.
&quot;A String&quot;,
@@ -351,7 +351,7 @@ Returns:
&quot;displayName&quot;: &quot;A String&quot;, # Display name to show to users.
&quot;entityTypes&quot;: [ # Entity types of the schema.
{ # EntityType is the wrapper of a label of the corresponding model with detailed attributes and limitations for entity-based processors. Multiple types can also compose a dependency tree to represent nested types.
- &quot;baseType&quot;: &quot;A String&quot;, # Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address.
+ &quot;baseType&quot;: &quot;A String&quot;, # Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address. `duration` - the entity is a duration.
&quot;description&quot;: &quot;A String&quot;, # Description of the entity type.
&quot;enumValues&quot;: [ # If specified, lists all the possible values for this entity.
&quot;A String&quot;,
diff --git a/docs/dyn/drive_v2.files.html b/docs/dyn/drive_v2.files.html
index 0223f2edb..02529bd0f 100644
--- a/docs/dyn/drive_v2.files.html
+++ b/docs/dyn/drive_v2.files.html
@@ -112,7 +112,7 @@ h1, h2, h3 {
<p class="firstline">Retrieves the next page of results.</p>
<p class="toc_element">
<code><a href="#patch">patch(fileId, addParents=None, body=None, convert=None, enforceSingleParent=None, includePermissionsForView=None, modifiedDateBehavior=None, newRevision=None, ocr=None, ocrLanguage=None, pinned=None, removeParents=None, setModifiedDate=None, supportsAllDrives=None, supportsTeamDrives=None, timedTextLanguage=None, timedTextTrackName=None, updateViewedDate=None, useContentAsIndexableText=None)</a></code></p>
-<p class="firstline">Updates file metadata and/or content. This method supports patch semantics.</p>
+<p class="firstline">Updates a file's metadata and/or content. When calling this method, only populate fields in the request that you want to modify. When updating fields, some fields might might change automatically, such as modifiedDate. This method supports patch semantics.</p>
<p class="toc_element">
<code><a href="#touch">touch(fileId, includePermissionsForView=None, supportsAllDrives=None, supportsTeamDrives=None)</a></code></p>
<p class="firstline">Set the file's updated time to the current server time.</p>
@@ -124,7 +124,7 @@ h1, h2, h3 {
<p class="firstline">Restores a file from the trash. The currently authenticated user must own the file or be at least a fileOrganizer on the parent for shared drive files. Only the owner may untrash a file.</p>
<p class="toc_element">
<code><a href="#update">update(fileId, addParents=None, body=None, convert=None, enforceSingleParent=None, includePermissionsForView=None, media_body=None, media_mime_type=None, modifiedDateBehavior=None, newRevision=None, ocr=None, ocrLanguage=None, pinned=None, removeParents=None, setModifiedDate=None, supportsAllDrives=None, supportsTeamDrives=None, timedTextLanguage=None, timedTextTrackName=None, updateViewedDate=None, useContentAsIndexableText=None)</a></code></p>
-<p class="firstline">Updates file metadata and/or content.</p>
+<p class="firstline">Updates a file's metadata and/or content. When calling this method, only populate fields in the request that you want to modify. When updating fields, some fields might be changed automatically, such as modifiedDate. This method supports patch semantics.</p>
<p class="toc_element">
<code><a href="#watch">watch(fileId, acknowledgeAbuse=None, body=None, includePermissionsForView=None, projection=None, revisionId=None, supportsAllDrives=None, supportsTeamDrives=None, updateViewedDate=None)</a></code></p>
<p class="firstline">Subscribe to changes on a file</p>
@@ -2602,7 +2602,7 @@ Returns:
<div class="method">
<code class="details" id="patch">patch(fileId, addParents=None, body=None, convert=None, enforceSingleParent=None, includePermissionsForView=None, modifiedDateBehavior=None, newRevision=None, ocr=None, ocrLanguage=None, pinned=None, removeParents=None, setModifiedDate=None, supportsAllDrives=None, supportsTeamDrives=None, timedTextLanguage=None, timedTextTrackName=None, updateViewedDate=None, useContentAsIndexableText=None)</code>
- <pre>Updates file metadata and/or content. This method supports patch semantics.
+ <pre>Updates a file&#x27;s metadata and/or content. When calling this method, only populate fields in the request that you want to modify. When updating fields, some fields might might change automatically, such as modifiedDate. This method supports patch semantics.
Args:
fileId: string, The ID of the file to update. (required)
@@ -4552,7 +4552,7 @@ Returns:
<div class="method">
<code class="details" id="update">update(fileId, addParents=None, body=None, convert=None, enforceSingleParent=None, includePermissionsForView=None, media_body=None, media_mime_type=None, modifiedDateBehavior=None, newRevision=None, ocr=None, ocrLanguage=None, pinned=None, removeParents=None, setModifiedDate=None, supportsAllDrives=None, supportsTeamDrives=None, timedTextLanguage=None, timedTextTrackName=None, updateViewedDate=None, useContentAsIndexableText=None)</code>
- <pre>Updates file metadata and/or content.
+ <pre>Updates a file&#x27;s metadata and/or content. When calling this method, only populate fields in the request that you want to modify. When updating fields, some fields might be changed automatically, such as modifiedDate. This method supports patch semantics.
Args:
fileId: string, The ID of the file to update. (required)
diff --git a/docs/dyn/drive_v3.files.html b/docs/dyn/drive_v3.files.html
index ea498eafb..0df0e8384 100644
--- a/docs/dyn/drive_v3.files.html
+++ b/docs/dyn/drive_v3.files.html
@@ -112,7 +112,7 @@ h1, h2, h3 {
<p class="firstline">Retrieves the next page of results.</p>
<p class="toc_element">
<code><a href="#update">update(fileId, addParents=None, body=None, enforceSingleParent=None, includePermissionsForView=None, keepRevisionForever=None, media_body=None, media_mime_type=None, ocrLanguage=None, removeParents=None, supportsAllDrives=None, supportsTeamDrives=None, useContentAsIndexableText=None)</a></code></p>
-<p class="firstline">Updates a file's metadata and/or content. This method supports patch semantics.</p>
+<p class="firstline">Updates a file's metadata and/or content. When calling this method, only populate fields in the request that you want to modify. When updating fields, some fields might change automatically, such as modifiedDate. This method supports patch semantics.</p>
<p class="toc_element">
<code><a href="#watch">watch(fileId, acknowledgeAbuse=None, body=None, includePermissionsForView=None, supportsAllDrives=None, supportsTeamDrives=None)</a></code></p>
<p class="firstline">Subscribes to changes to a file</p>
@@ -1879,7 +1879,7 @@ Returns:
<div class="method">
<code class="details" id="update">update(fileId, addParents=None, body=None, enforceSingleParent=None, includePermissionsForView=None, keepRevisionForever=None, media_body=None, media_mime_type=None, ocrLanguage=None, removeParents=None, supportsAllDrives=None, supportsTeamDrives=None, useContentAsIndexableText=None)</code>
- <pre>Updates a file&#x27;s metadata and/or content. This method supports patch semantics.
+ <pre>Updates a file&#x27;s metadata and/or content. When calling this method, only populate fields in the request that you want to modify. When updating fields, some fields might change automatically, such as modifiedDate. This method supports patch semantics.
Args:
fileId: string, The ID of the file. (required)
diff --git a/docs/dyn/firestore_v1.projects.databases.html b/docs/dyn/firestore_v1.projects.databases.html
index 3e7fa3fc3..3ee210ed0 100644
--- a/docs/dyn/firestore_v1.projects.databases.html
+++ b/docs/dyn/firestore_v1.projects.databases.html
@@ -96,8 +96,17 @@ h1, h2, h3 {
<code><a href="#exportDocuments">exportDocuments(name, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Exports a copy of all or a subset of documents from Google Cloud Firestore to another storage system, such as Google Cloud Storage. Recent updates to documents may not be reflected in the export. The export occurs in the background and its progress can be monitored and managed via the Operation resource that is created. The output of an export may only be used once the associated operation is done. If an export operation is cancelled before completion it may leave partial data behind in Google Cloud Storage. For more details on export behavior and output format, refer to: https://cloud.google.com/firestore/docs/manage-data/export-import</p>
<p class="toc_element">
+ <code><a href="#get">get(name, x__xgafv=None)</a></code></p>
+<p class="firstline">Gets information about a database.</p>
+<p class="toc_element">
<code><a href="#importDocuments">importDocuments(name, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Imports documents into Google Cloud Firestore. Existing documents with the same name are overwritten. The import occurs in the background and its progress can be monitored and managed via the Operation resource that is created. If an ImportDocuments operation is cancelled, it is possible that a subset of the data has already been imported to Cloud Firestore.</p>
+<p class="toc_element">
+ <code><a href="#list">list(parent, x__xgafv=None)</a></code></p>
+<p class="firstline">List all the databases in the project.</p>
+<p class="toc_element">
+ <code><a href="#patch">patch(name, body=None, updateMask=None, x__xgafv=None)</a></code></p>
+<p class="firstline">Updates a database.</p>
<h3>Method Details</h3>
<div class="method">
<code class="details" id="close">close()</code>
@@ -150,6 +159,29 @@ Returns:
</div>
<div class="method">
+ <code class="details" id="get">get(name, x__xgafv=None)</code>
+ <pre>Gets information about a database.
+
+Args:
+ name: string, Required. A name of the form `projects/{project_id}/databases/{database_id}` (required)
+ x__xgafv: string, V1 error format.
+ Allowed values
+ 1 - v1 error format
+ 2 - v2 error format
+
+Returns:
+ An object of the form:
+
+ { # A Cloud Firestore Database in Native Mode. Currently one database is allowed per cloud project. It is named &#x27;(default)&#x27;
+ &quot;concurrencyMode&quot;: &quot;A String&quot;, # The concurrency control mode to use for this database.
+ &quot;etag&quot;: &quot;A String&quot;, # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+ &quot;locationId&quot;: &quot;A String&quot;, # The location of the database. Available databases are listed at https://cloud.google.com/firestore/docs/locations.
+ &quot;name&quot;: &quot;A String&quot;, # The resource name of the Database. Format: `projects/{project}/databases/{database}`
+ &quot;type&quot;: &quot;A String&quot;, # The type of the database. See https://cloud.google.com/datastore/docs/firestore-or-datastore for information about how to choose.
+}</pre>
+</div>
+
+<div class="method">
<code class="details" id="importDocuments">importDocuments(name, body=None, x__xgafv=None)</code>
<pre>Imports documents into Google Cloud Firestore. Existing documents with the same name are overwritten. The import occurs in the background and its progress can be monitored and managed via the Operation resource that is created. If an ImportDocuments operation is cancelled, it is possible that a subset of the data has already been imported to Cloud Firestore.
@@ -194,4 +226,78 @@ Returns:
}</pre>
</div>
+<div class="method">
+ <code class="details" id="list">list(parent, x__xgafv=None)</code>
+ <pre>List all the databases in the project.
+
+Args:
+ parent: string, Required. A parent name of the form `projects/{project_id}` (required)
+ x__xgafv: string, V1 error format.
+ Allowed values
+ 1 - v1 error format
+ 2 - v2 error format
+
+Returns:
+ An object of the form:
+
+ { # The list of databases for a project.
+ &quot;databases&quot;: [ # The databases in the project.
+ { # A Cloud Firestore Database in Native Mode. Currently one database is allowed per cloud project. It is named &#x27;(default)&#x27;
+ &quot;concurrencyMode&quot;: &quot;A String&quot;, # The concurrency control mode to use for this database.
+ &quot;etag&quot;: &quot;A String&quot;, # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+ &quot;locationId&quot;: &quot;A String&quot;, # The location of the database. Available databases are listed at https://cloud.google.com/firestore/docs/locations.
+ &quot;name&quot;: &quot;A String&quot;, # The resource name of the Database. Format: `projects/{project}/databases/{database}`
+ &quot;type&quot;: &quot;A String&quot;, # The type of the database. See https://cloud.google.com/datastore/docs/firestore-or-datastore for information about how to choose.
+ },
+ ],
+}</pre>
+</div>
+
+<div class="method">
+ <code class="details" id="patch">patch(name, body=None, updateMask=None, x__xgafv=None)</code>
+ <pre>Updates a database.
+
+Args:
+ name: string, The resource name of the Database. Format: `projects/{project}/databases/{database}` (required)
+ body: object, The request body.
+ The object takes the form of:
+
+{ # A Cloud Firestore Database in Native Mode. Currently one database is allowed per cloud project. It is named &#x27;(default)&#x27;
+ &quot;concurrencyMode&quot;: &quot;A String&quot;, # The concurrency control mode to use for this database.
+ &quot;etag&quot;: &quot;A String&quot;, # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+ &quot;locationId&quot;: &quot;A String&quot;, # The location of the database. Available databases are listed at https://cloud.google.com/firestore/docs/locations.
+ &quot;name&quot;: &quot;A String&quot;, # The resource name of the Database. Format: `projects/{project}/databases/{database}`
+ &quot;type&quot;: &quot;A String&quot;, # The type of the database. See https://cloud.google.com/datastore/docs/firestore-or-datastore for information about how to choose.
+}
+
+ updateMask: string, The list of fields to be updated.
+ x__xgafv: string, V1 error format.
+ Allowed values
+ 1 - v1 error format
+ 2 - v2 error format
+
+Returns:
+ An object of the form:
+
+ { # This resource represents a long-running operation that is the result of a network API call.
+ &quot;done&quot;: True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+ &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+ &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
+ &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+ {
+ &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
+ },
+ ],
+ &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+ },
+ &quot;metadata&quot;: { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+ &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
+ },
+ &quot;name&quot;: &quot;A String&quot;, # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+ &quot;response&quot;: { # The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+ &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
+ },
+}</pre>
+</div>
+
</body></html> \ No newline at end of file
diff --git a/docs/dyn/fitness_v1.users.dataset.html b/docs/dyn/fitness_v1.users.dataset.html
index ff83b6112..77dd48243 100644
--- a/docs/dyn/fitness_v1.users.dataset.html
+++ b/docs/dyn/fitness_v1.users.dataset.html
@@ -116,7 +116,7 @@ Args:
&quot;value&quot;: 42,
},
},
- &quot;endTimeMillis&quot;: &quot;A String&quot;, # The end of a window of time. Data that intersects with this time window will be aggregated. The time is in milliseconds since epoch, inclusive.
+ &quot;endTimeMillis&quot;: &quot;A String&quot;, # The end of a window of time. Data that intersects with this time window will be aggregated. The time is in milliseconds since epoch, inclusive. The maximum allowed difference between start_time_millis // and end_time_millis is 7776000000 (roughly 90 days).
&quot;filteredDataQualityStandard&quot;: [ # DO NOT POPULATE THIS FIELD. It is ignored.
&quot;A String&quot;,
],
diff --git a/docs/dyn/gameservices_v1.projects.locations.gameServerDeployments.html b/docs/dyn/gameservices_v1.projects.locations.gameServerDeployments.html
index 8c5d9e58d..1159eff2d 100644
--- a/docs/dyn/gameservices_v1.projects.locations.gameServerDeployments.html
+++ b/docs/dyn/gameservices_v1.projects.locations.gameServerDeployments.html
@@ -364,7 +364,7 @@ Returns:
&quot;in&quot;: [ # If one or more &#x27;in&#x27; clauses are specified, the rule matches if the PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries.
&quot;A String&quot;,
],
- &quot;logConfig&quot;: [ # The config returned to callers of tech.iam.IAM.CheckPolicy for any entries that match the LOG action.
+ &quot;logConfig&quot;: [ # The config returned to callers of CheckPolicy for any entries that match the LOG action.
{ # Specifies what kind of log the caller must write
&quot;cloudAudit&quot;: { # Write a Cloud Audit log # Cloud audit options.
&quot;authorizationLoggingOptions&quot;: { # Authorization-related information used by Cloud Audit Logging. # Information used by the Cloud Audit Logging pipeline.
@@ -668,7 +668,7 @@ Args:
&quot;in&quot;: [ # If one or more &#x27;in&#x27; clauses are specified, the rule matches if the PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries.
&quot;A String&quot;,
],
- &quot;logConfig&quot;: [ # The config returned to callers of tech.iam.IAM.CheckPolicy for any entries that match the LOG action.
+ &quot;logConfig&quot;: [ # The config returned to callers of CheckPolicy for any entries that match the LOG action.
{ # Specifies what kind of log the caller must write
&quot;cloudAudit&quot;: { # Write a Cloud Audit log # Cloud audit options.
&quot;authorizationLoggingOptions&quot;: { # Authorization-related information used by Cloud Audit Logging. # Information used by the Cloud Audit Logging pipeline.
@@ -765,7 +765,7 @@ Returns:
&quot;in&quot;: [ # If one or more &#x27;in&#x27; clauses are specified, the rule matches if the PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries.
&quot;A String&quot;,
],
- &quot;logConfig&quot;: [ # The config returned to callers of tech.iam.IAM.CheckPolicy for any entries that match the LOG action.
+ &quot;logConfig&quot;: [ # The config returned to callers of CheckPolicy for any entries that match the LOG action.
{ # Specifies what kind of log the caller must write
&quot;cloudAudit&quot;: { # Write a Cloud Audit log # Cloud audit options.
&quot;authorizationLoggingOptions&quot;: { # Authorization-related information used by Cloud Audit Logging. # Information used by the Cloud Audit Logging pipeline.
diff --git a/docs/dyn/gkehub_v1.projects.locations.features.html b/docs/dyn/gkehub_v1.projects.locations.features.html
index 4309d22df..89b39055e 100644
--- a/docs/dyn/gkehub_v1.projects.locations.features.html
+++ b/docs/dyn/gkehub_v1.projects.locations.features.html
@@ -132,13 +132,26 @@ Args:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
@@ -166,6 +179,7 @@ Args:
&quot;clusterName&quot;: &quot;A String&quot;, # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership&#x27;s membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation.
&quot;configSyncState&quot;: { # State information for ConfigSync # Current sync status
&quot;deploymentState&quot;: { # The state of ConfigSync&#x27;s deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Deployment state of admission-webhook
&quot;gitSync&quot;: &quot;A String&quot;, # Deployment state of the git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Deployment state of the importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Deployment state of the monitor pod
@@ -200,6 +214,7 @@ Args:
&quot;syncToken&quot;: &quot;A String&quot;, # Token indicating the state of the syncer.
},
&quot;version&quot;: { # Specific versioning information pertaining to ConfigSync&#x27;s Pods # The version of ConfigSync deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Version of the deployed admission_webhook pod
&quot;gitSync&quot;: &quot;A String&quot;, # Version of the deployed git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Version of the deployed importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Version of the deployed monitor pod
@@ -223,13 +238,26 @@ Args:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
@@ -389,13 +417,26 @@ Returns:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
@@ -423,6 +464,7 @@ Returns:
&quot;clusterName&quot;: &quot;A String&quot;, # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership&#x27;s membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation.
&quot;configSyncState&quot;: { # State information for ConfigSync # Current sync status
&quot;deploymentState&quot;: { # The state of ConfigSync&#x27;s deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Deployment state of admission-webhook
&quot;gitSync&quot;: &quot;A String&quot;, # Deployment state of the git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Deployment state of the importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Deployment state of the monitor pod
@@ -457,6 +499,7 @@ Returns:
&quot;syncToken&quot;: &quot;A String&quot;, # Token indicating the state of the syncer.
},
&quot;version&quot;: { # Specific versioning information pertaining to ConfigSync&#x27;s Pods # The version of ConfigSync deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Version of the deployed admission_webhook pod
&quot;gitSync&quot;: &quot;A String&quot;, # Version of the deployed git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Version of the deployed importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Version of the deployed monitor pod
@@ -480,13 +523,26 @@ Returns:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
@@ -634,13 +690,26 @@ Returns:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
@@ -668,6 +737,7 @@ Returns:
&quot;clusterName&quot;: &quot;A String&quot;, # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership&#x27;s membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation.
&quot;configSyncState&quot;: { # State information for ConfigSync # Current sync status
&quot;deploymentState&quot;: { # The state of ConfigSync&#x27;s deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Deployment state of admission-webhook
&quot;gitSync&quot;: &quot;A String&quot;, # Deployment state of the git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Deployment state of the importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Deployment state of the monitor pod
@@ -702,6 +772,7 @@ Returns:
&quot;syncToken&quot;: &quot;A String&quot;, # Token indicating the state of the syncer.
},
&quot;version&quot;: { # Specific versioning information pertaining to ConfigSync&#x27;s Pods # The version of ConfigSync deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Version of the deployed admission_webhook pod
&quot;gitSync&quot;: &quot;A String&quot;, # Version of the deployed git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Version of the deployed importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Version of the deployed monitor pod
@@ -725,13 +796,26 @@ Returns:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
@@ -835,13 +919,26 @@ Args:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
@@ -869,6 +966,7 @@ Args:
&quot;clusterName&quot;: &quot;A String&quot;, # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership&#x27;s membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation.
&quot;configSyncState&quot;: { # State information for ConfigSync # Current sync status
&quot;deploymentState&quot;: { # The state of ConfigSync&#x27;s deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Deployment state of admission-webhook
&quot;gitSync&quot;: &quot;A String&quot;, # Deployment state of the git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Deployment state of the importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Deployment state of the monitor pod
@@ -903,6 +1001,7 @@ Args:
&quot;syncToken&quot;: &quot;A String&quot;, # Token indicating the state of the syncer.
},
&quot;version&quot;: { # Specific versioning information pertaining to ConfigSync&#x27;s Pods # The version of ConfigSync deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Version of the deployed admission_webhook pod
&quot;gitSync&quot;: &quot;A String&quot;, # Version of the deployed git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Version of the deployed importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Version of the deployed monitor pod
@@ -926,13 +1025,26 @@ Args:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
diff --git a/docs/dyn/gkehub_v1alpha.projects.locations.features.html b/docs/dyn/gkehub_v1alpha.projects.locations.features.html
index 1be2f78ec..a732ed280 100644
--- a/docs/dyn/gkehub_v1alpha.projects.locations.features.html
+++ b/docs/dyn/gkehub_v1alpha.projects.locations.features.html
@@ -135,13 +135,26 @@ Args:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
@@ -197,6 +210,7 @@ Args:
&quot;clusterName&quot;: &quot;A String&quot;, # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership&#x27;s membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation.
&quot;configSyncState&quot;: { # State information for ConfigSync # Current sync status
&quot;deploymentState&quot;: { # The state of ConfigSync&#x27;s deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Deployment state of admission-webhook
&quot;gitSync&quot;: &quot;A String&quot;, # Deployment state of the git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Deployment state of the importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Deployment state of the monitor pod
@@ -231,6 +245,7 @@ Args:
&quot;syncToken&quot;: &quot;A String&quot;, # Token indicating the state of the syncer.
},
&quot;version&quot;: { # Specific versioning information pertaining to ConfigSync&#x27;s Pods # The version of ConfigSync deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Version of the deployed admission_webhook pod
&quot;gitSync&quot;: &quot;A String&quot;, # Version of the deployed git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Version of the deployed importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Version of the deployed monitor pod
@@ -257,13 +272,26 @@ Args:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
@@ -506,13 +534,26 @@ Returns:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
@@ -568,6 +609,7 @@ Returns:
&quot;clusterName&quot;: &quot;A String&quot;, # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership&#x27;s membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation.
&quot;configSyncState&quot;: { # State information for ConfigSync # Current sync status
&quot;deploymentState&quot;: { # The state of ConfigSync&#x27;s deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Deployment state of admission-webhook
&quot;gitSync&quot;: &quot;A String&quot;, # Deployment state of the git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Deployment state of the importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Deployment state of the monitor pod
@@ -602,6 +644,7 @@ Returns:
&quot;syncToken&quot;: &quot;A String&quot;, # Token indicating the state of the syncer.
},
&quot;version&quot;: { # Specific versioning information pertaining to ConfigSync&#x27;s Pods # The version of ConfigSync deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Version of the deployed admission_webhook pod
&quot;gitSync&quot;: &quot;A String&quot;, # Version of the deployed git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Version of the deployed importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Version of the deployed monitor pod
@@ -628,13 +671,26 @@ Returns:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
@@ -865,13 +921,26 @@ Returns:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
@@ -927,6 +996,7 @@ Returns:
&quot;clusterName&quot;: &quot;A String&quot;, # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership&#x27;s membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation.
&quot;configSyncState&quot;: { # State information for ConfigSync # Current sync status
&quot;deploymentState&quot;: { # The state of ConfigSync&#x27;s deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Deployment state of admission-webhook
&quot;gitSync&quot;: &quot;A String&quot;, # Deployment state of the git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Deployment state of the importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Deployment state of the monitor pod
@@ -961,6 +1031,7 @@ Returns:
&quot;syncToken&quot;: &quot;A String&quot;, # Token indicating the state of the syncer.
},
&quot;version&quot;: { # Specific versioning information pertaining to ConfigSync&#x27;s Pods # The version of ConfigSync deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Version of the deployed admission_webhook pod
&quot;gitSync&quot;: &quot;A String&quot;, # Version of the deployed git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Version of the deployed importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Version of the deployed monitor pod
@@ -987,13 +1058,26 @@ Returns:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
@@ -1180,13 +1264,26 @@ Args:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
@@ -1242,6 +1339,7 @@ Args:
&quot;clusterName&quot;: &quot;A String&quot;, # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership&#x27;s membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation.
&quot;configSyncState&quot;: { # State information for ConfigSync # Current sync status
&quot;deploymentState&quot;: { # The state of ConfigSync&#x27;s deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Deployment state of admission-webhook
&quot;gitSync&quot;: &quot;A String&quot;, # Deployment state of the git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Deployment state of the importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Deployment state of the monitor pod
@@ -1276,6 +1374,7 @@ Args:
&quot;syncToken&quot;: &quot;A String&quot;, # Token indicating the state of the syncer.
},
&quot;version&quot;: { # Specific versioning information pertaining to ConfigSync&#x27;s Pods # The version of ConfigSync deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Version of the deployed admission_webhook pod
&quot;gitSync&quot;: &quot;A String&quot;, # Version of the deployed git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Version of the deployed importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Version of the deployed monitor pod
@@ -1302,13 +1401,26 @@ Args:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
diff --git a/docs/dyn/gkehub_v1beta.projects.locations.features.html b/docs/dyn/gkehub_v1beta.projects.locations.features.html
index 7fe99b66f..73fe64741 100644
--- a/docs/dyn/gkehub_v1beta.projects.locations.features.html
+++ b/docs/dyn/gkehub_v1beta.projects.locations.features.html
@@ -135,13 +135,26 @@ Args:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
@@ -196,6 +209,7 @@ Args:
&quot;clusterName&quot;: &quot;A String&quot;, # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership&#x27;s membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation.
&quot;configSyncState&quot;: { # State information for ConfigSync # Current sync status
&quot;deploymentState&quot;: { # The state of ConfigSync&#x27;s deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Deployment state of admission-webhook
&quot;gitSync&quot;: &quot;A String&quot;, # Deployment state of the git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Deployment state of the importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Deployment state of the monitor pod
@@ -230,6 +244,7 @@ Args:
&quot;syncToken&quot;: &quot;A String&quot;, # Token indicating the state of the syncer.
},
&quot;version&quot;: { # Specific versioning information pertaining to ConfigSync&#x27;s Pods # The version of ConfigSync deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Version of the deployed admission_webhook pod
&quot;gitSync&quot;: &quot;A String&quot;, # Version of the deployed git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Version of the deployed importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Version of the deployed monitor pod
@@ -256,13 +271,26 @@ Args:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
@@ -456,13 +484,26 @@ Returns:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
@@ -517,6 +558,7 @@ Returns:
&quot;clusterName&quot;: &quot;A String&quot;, # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership&#x27;s membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation.
&quot;configSyncState&quot;: { # State information for ConfigSync # Current sync status
&quot;deploymentState&quot;: { # The state of ConfigSync&#x27;s deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Deployment state of admission-webhook
&quot;gitSync&quot;: &quot;A String&quot;, # Deployment state of the git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Deployment state of the importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Deployment state of the monitor pod
@@ -551,6 +593,7 @@ Returns:
&quot;syncToken&quot;: &quot;A String&quot;, # Token indicating the state of the syncer.
},
&quot;version&quot;: { # Specific versioning information pertaining to ConfigSync&#x27;s Pods # The version of ConfigSync deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Version of the deployed admission_webhook pod
&quot;gitSync&quot;: &quot;A String&quot;, # Version of the deployed git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Version of the deployed importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Version of the deployed monitor pod
@@ -577,13 +620,26 @@ Returns:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
@@ -765,13 +821,26 @@ Returns:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
@@ -826,6 +895,7 @@ Returns:
&quot;clusterName&quot;: &quot;A String&quot;, # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership&#x27;s membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation.
&quot;configSyncState&quot;: { # State information for ConfigSync # Current sync status
&quot;deploymentState&quot;: { # The state of ConfigSync&#x27;s deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Deployment state of admission-webhook
&quot;gitSync&quot;: &quot;A String&quot;, # Deployment state of the git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Deployment state of the importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Deployment state of the monitor pod
@@ -860,6 +930,7 @@ Returns:
&quot;syncToken&quot;: &quot;A String&quot;, # Token indicating the state of the syncer.
},
&quot;version&quot;: { # Specific versioning information pertaining to ConfigSync&#x27;s Pods # The version of ConfigSync deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Version of the deployed admission_webhook pod
&quot;gitSync&quot;: &quot;A String&quot;, # Version of the deployed git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Version of the deployed importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Version of the deployed monitor pod
@@ -886,13 +957,26 @@ Returns:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
@@ -1030,13 +1114,26 @@ Args:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
@@ -1091,6 +1188,7 @@ Args:
&quot;clusterName&quot;: &quot;A String&quot;, # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership&#x27;s membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation.
&quot;configSyncState&quot;: { # State information for ConfigSync # Current sync status
&quot;deploymentState&quot;: { # The state of ConfigSync&#x27;s deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Deployment state of admission-webhook
&quot;gitSync&quot;: &quot;A String&quot;, # Deployment state of the git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Deployment state of the importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Deployment state of the monitor pod
@@ -1125,6 +1223,7 @@ Args:
&quot;syncToken&quot;: &quot;A String&quot;, # Token indicating the state of the syncer.
},
&quot;version&quot;: { # Specific versioning information pertaining to ConfigSync&#x27;s Pods # The version of ConfigSync deployed
+ &quot;admissionWebhook&quot;: &quot;A String&quot;, # Version of the deployed admission_webhook pod
&quot;gitSync&quot;: &quot;A String&quot;, # Version of the deployed git-sync pod
&quot;importer&quot;: &quot;A String&quot;, # Version of the deployed importer pod
&quot;monitor&quot;: &quot;A String&quot;, # Version of the deployed monitor pod
@@ -1151,13 +1250,26 @@ Args:
&quot;git&quot;: { # Git repo configuration for a single cluster. # Git repo configuration for the cluster.
&quot;gcpServiceAccountEmail&quot;: &quot;A String&quot;, # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount.
&quot;httpsProxy&quot;: &quot;A String&quot;, # URL for the HTTPS proxy to be used when communicating with the Git repo.
+ &quot;noSslVerify&quot;: True or False, # Enable or disable the SSL certificate verification Default: false.
&quot;policyDir&quot;: &quot;A String&quot;, # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.
&quot;secretType&quot;: &quot;A String&quot;, # Type of secret configured for access to the Git repo.
&quot;syncBranch&quot;: &quot;A String&quot;, # The branch of the repository to sync from. Default: master.
+ &quot;syncDepth&quot;: &quot;A String&quot;, # The depth of git commits synced by the git-sync container.
&quot;syncRepo&quot;: &quot;A String&quot;, # The URL of the Git repository to use as the source of truth.
&quot;syncRev&quot;: &quot;A String&quot;, # Git revision (tag or hash) to check out. Default HEAD.
&quot;syncWaitSecs&quot;: &quot;A String&quot;, # Period in seconds between consecutive syncs. Default: 15.
},
+ &quot;resourceRequirements&quot;: { # Specifies CPU and memory limits for containers, keyed by container name
+ &quot;a_key&quot;: { # ResourceRequirements allows to override the CPU and memory resource requirements of a container.
+ &quot;containerName&quot;: &quot;A String&quot;, # Name of the container
+ &quot;cpuLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ &quot;memoryLimit&quot;: { # The view model of a single quantity, e.g. &quot;800 MiB&quot;. Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container
+ &quot;string&quot;: &quot;A String&quot;, # Stringified version of the quantity, e.g., &quot;800 MiB&quot;.
+ },
+ },
+ },
&quot;sourceFormat&quot;: &quot;A String&quot;, # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode.
},
&quot;hierarchyController&quot;: { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster.
diff --git a/docs/dyn/healthcare_v1.projects.locations.datasets.consentStores.attributeDefinitions.html b/docs/dyn/healthcare_v1.projects.locations.datasets.consentStores.attributeDefinitions.html
index 4c7c1ef8d..01de30721 100644
--- a/docs/dyn/healthcare_v1.projects.locations.datasets.consentStores.attributeDefinitions.html
+++ b/docs/dyn/healthcare_v1.projects.locations.datasets.consentStores.attributeDefinitions.html
@@ -111,7 +111,7 @@ Args:
The object takes the form of:
{ # A client-defined consent attribute.
- &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation.
+ &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation.
&quot;A String&quot;,
],
&quot;category&quot;: &quot;A String&quot;, # Required. The category of the attribute. The value of this field cannot be changed after creation.
@@ -133,7 +133,7 @@ Returns:
An object of the form:
{ # A client-defined consent attribute.
- &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation.
+ &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation.
&quot;A String&quot;,
],
&quot;category&quot;: &quot;A String&quot;, # Required. The category of the attribute. The value of this field cannot be changed after creation.
@@ -179,7 +179,7 @@ Returns:
An object of the form:
{ # A client-defined consent attribute.
- &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation.
+ &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation.
&quot;A String&quot;,
],
&quot;category&quot;: &quot;A String&quot;, # Required. The category of the attribute. The value of this field cannot be changed after creation.
@@ -212,7 +212,7 @@ Returns:
{
&quot;attributeDefinitions&quot;: [ # The returned Attribute definitions. The maximum number of attributes returned is determined by the value of page_size in the ListAttributeDefinitionsRequest.
{ # A client-defined consent attribute.
- &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation.
+ &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation.
&quot;A String&quot;,
],
&quot;category&quot;: &quot;A String&quot;, # Required. The category of the attribute. The value of this field cannot be changed after creation.
@@ -252,7 +252,7 @@ Args:
The object takes the form of:
{ # A client-defined consent attribute.
- &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation.
+ &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation.
&quot;A String&quot;,
],
&quot;category&quot;: &quot;A String&quot;, # Required. The category of the attribute. The value of this field cannot be changed after creation.
@@ -274,7 +274,7 @@ Returns:
An object of the form:
{ # A client-defined consent attribute.
- &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation.
+ &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation.
&quot;A String&quot;,
],
&quot;category&quot;: &quot;A String&quot;, # Required. The category of the attribute. The value of this field cannot be changed after creation.
diff --git a/docs/dyn/healthcare_v1beta1.projects.locations.datasets.consentStores.attributeDefinitions.html b/docs/dyn/healthcare_v1beta1.projects.locations.datasets.consentStores.attributeDefinitions.html
index 097a5c5f0..2dab70a33 100644
--- a/docs/dyn/healthcare_v1beta1.projects.locations.datasets.consentStores.attributeDefinitions.html
+++ b/docs/dyn/healthcare_v1beta1.projects.locations.datasets.consentStores.attributeDefinitions.html
@@ -111,7 +111,7 @@ Args:
The object takes the form of:
{ # A client-defined consent attribute.
- &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation.
+ &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation.
&quot;A String&quot;,
],
&quot;category&quot;: &quot;A String&quot;, # Required. The category of the attribute. The value of this field cannot be changed after creation.
@@ -133,7 +133,7 @@ Returns:
An object of the form:
{ # A client-defined consent attribute.
- &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation.
+ &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation.
&quot;A String&quot;,
],
&quot;category&quot;: &quot;A String&quot;, # Required. The category of the attribute. The value of this field cannot be changed after creation.
@@ -179,7 +179,7 @@ Returns:
An object of the form:
{ # A client-defined consent attribute.
- &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation.
+ &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation.
&quot;A String&quot;,
],
&quot;category&quot;: &quot;A String&quot;, # Required. The category of the attribute. The value of this field cannot be changed after creation.
@@ -212,7 +212,7 @@ Returns:
{
&quot;attributeDefinitions&quot;: [ # The returned Attribute definitions. The maximum number of attributes returned is determined by the value of page_size in the ListAttributeDefinitionsRequest.
{ # A client-defined consent attribute.
- &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation.
+ &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation.
&quot;A String&quot;,
],
&quot;category&quot;: &quot;A String&quot;, # Required. The category of the attribute. The value of this field cannot be changed after creation.
@@ -252,7 +252,7 @@ Args:
The object takes the form of:
{ # A client-defined consent attribute.
- &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation.
+ &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation.
&quot;A String&quot;,
],
&quot;category&quot;: &quot;A String&quot;, # Required. The category of the attribute. The value of this field cannot be changed after creation.
@@ -274,7 +274,7 @@ Returns:
An object of the form:
{ # A client-defined consent attribute.
- &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation.
+ &quot;allowedValues&quot;: [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation.
&quot;A String&quot;,
],
&quot;category&quot;: &quot;A String&quot;, # Required. The category of the attribute. The value of this field cannot be changed after creation.
diff --git a/docs/dyn/iam_v1.projects.serviceAccounts.keys.html b/docs/dyn/iam_v1.projects.serviceAccounts.keys.html
index dc5ba6aac..f5549d562 100644
--- a/docs/dyn/iam_v1.projects.serviceAccounts.keys.html
+++ b/docs/dyn/iam_v1.projects.serviceAccounts.keys.html
@@ -85,10 +85,10 @@ h1, h2, h3 {
<p class="firstline">Deletes a ServiceAccountKey. Deleting a service account key does not revoke short-lived credentials that have been issued based on the service account key.</p>
<p class="toc_element">
<code><a href="#disable">disable(name, body=None, x__xgafv=None)</a></code></p>
-<p class="firstline">Disable a ServiceAccountKey. A disabled service account key can be enabled through EnableServiceAccountKey. The API is currently in preview phase.</p>
+<p class="firstline">Disable a ServiceAccountKey. A disabled service account key can be enabled through EnableServiceAccountKey.</p>
<p class="toc_element">
<code><a href="#enable">enable(name, body=None, x__xgafv=None)</a></code></p>
-<p class="firstline">Enable a ServiceAccountKey. The API is currently in preview phase.</p>
+<p class="firstline">Enable a ServiceAccountKey.</p>
<p class="toc_element">
<code><a href="#get">get(name, publicKeyType=None, x__xgafv=None)</a></code></p>
<p class="firstline">Gets a ServiceAccountKey.</p>
@@ -160,7 +160,7 @@ Returns:
<div class="method">
<code class="details" id="disable">disable(name, body=None, x__xgafv=None)</code>
- <pre>Disable a ServiceAccountKey. A disabled service account key can be enabled through EnableServiceAccountKey. The API is currently in preview phase.
+ <pre>Disable a ServiceAccountKey. A disabled service account key can be enabled through EnableServiceAccountKey.
Args:
name: string, Required. The resource name of the service account key in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account. (required)
@@ -184,7 +184,7 @@ Returns:
<div class="method">
<code class="details" id="enable">enable(name, body=None, x__xgafv=None)</code>
- <pre>Enable a ServiceAccountKey. The API is currently in preview phase.
+ <pre>Enable a ServiceAccountKey.
Args:
name: string, Required. The resource name of the service account key in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account. (required)
diff --git a/docs/dyn/metastore_v1alpha.projects.locations.services.backups.html b/docs/dyn/metastore_v1alpha.projects.locations.services.backups.html
index e0ae829d1..5c51907e5 100644
--- a/docs/dyn/metastore_v1alpha.projects.locations.services.backups.html
+++ b/docs/dyn/metastore_v1alpha.projects.locations.services.backups.html
@@ -148,7 +148,7 @@ Args:
&quot;labels&quot;: { # User-defined labels for the metastore service.
&quot;a_key&quot;: &quot;A String&quot;,
},
- &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time.
+ &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type.
&quot;dayOfWeek&quot;: &quot;A String&quot;, # The day of week, when the window starts.
&quot;hourOfDay&quot;: 42, # The hour of day (0-23) when the window starts.
},
@@ -311,7 +311,7 @@ Returns:
&quot;labels&quot;: { # User-defined labels for the metastore service.
&quot;a_key&quot;: &quot;A String&quot;,
},
- &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time.
+ &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type.
&quot;dayOfWeek&quot;: &quot;A String&quot;, # The day of week, when the window starts.
&quot;hourOfDay&quot;: 42, # The hour of day (0-23) when the window starts.
},
@@ -462,7 +462,7 @@ Returns:
&quot;labels&quot;: { # User-defined labels for the metastore service.
&quot;a_key&quot;: &quot;A String&quot;,
},
- &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time.
+ &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type.
&quot;dayOfWeek&quot;: &quot;A String&quot;, # The day of week, when the window starts.
&quot;hourOfDay&quot;: 42, # The hour of day (0-23) when the window starts.
},
diff --git a/docs/dyn/metastore_v1alpha.projects.locations.services.html b/docs/dyn/metastore_v1alpha.projects.locations.services.html
index 1f9a46ad2..6413ef83e 100644
--- a/docs/dyn/metastore_v1alpha.projects.locations.services.html
+++ b/docs/dyn/metastore_v1alpha.projects.locations.services.html
@@ -164,7 +164,7 @@ Args:
&quot;labels&quot;: { # User-defined labels for the metastore service.
&quot;a_key&quot;: &quot;A String&quot;,
},
- &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time.
+ &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type.
&quot;dayOfWeek&quot;: &quot;A String&quot;, # The day of week, when the window starts.
&quot;hourOfDay&quot;: 42, # The hour of day (0-23) when the window starts.
},
@@ -361,7 +361,7 @@ Returns:
&quot;labels&quot;: { # User-defined labels for the metastore service.
&quot;a_key&quot;: &quot;A String&quot;,
},
- &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time.
+ &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type.
&quot;dayOfWeek&quot;: &quot;A String&quot;, # The day of week, when the window starts.
&quot;hourOfDay&quot;: 42, # The hour of day (0-23) when the window starts.
},
@@ -503,7 +503,7 @@ Returns:
&quot;labels&quot;: { # User-defined labels for the metastore service.
&quot;a_key&quot;: &quot;A String&quot;,
},
- &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time.
+ &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type.
&quot;dayOfWeek&quot;: &quot;A String&quot;, # The day of week, when the window starts.
&quot;hourOfDay&quot;: 42, # The hour of day (0-23) when the window starts.
},
@@ -604,7 +604,7 @@ Args:
&quot;labels&quot;: { # User-defined labels for the metastore service.
&quot;a_key&quot;: &quot;A String&quot;,
},
- &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time.
+ &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type.
&quot;dayOfWeek&quot;: &quot;A String&quot;, # The day of week, when the window starts.
&quot;hourOfDay&quot;: 42, # The hour of day (0-23) when the window starts.
},
diff --git a/docs/dyn/metastore_v1beta.projects.locations.services.backups.html b/docs/dyn/metastore_v1beta.projects.locations.services.backups.html
index 5ee8ebdec..eaa2d95f4 100644
--- a/docs/dyn/metastore_v1beta.projects.locations.services.backups.html
+++ b/docs/dyn/metastore_v1beta.projects.locations.services.backups.html
@@ -148,7 +148,7 @@ Args:
&quot;labels&quot;: { # User-defined labels for the metastore service.
&quot;a_key&quot;: &quot;A String&quot;,
},
- &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time.
+ &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type.
&quot;dayOfWeek&quot;: &quot;A String&quot;, # The day of week, when the window starts.
&quot;hourOfDay&quot;: 42, # The hour of day (0-23) when the window starts.
},
@@ -311,7 +311,7 @@ Returns:
&quot;labels&quot;: { # User-defined labels for the metastore service.
&quot;a_key&quot;: &quot;A String&quot;,
},
- &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time.
+ &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type.
&quot;dayOfWeek&quot;: &quot;A String&quot;, # The day of week, when the window starts.
&quot;hourOfDay&quot;: 42, # The hour of day (0-23) when the window starts.
},
@@ -462,7 +462,7 @@ Returns:
&quot;labels&quot;: { # User-defined labels for the metastore service.
&quot;a_key&quot;: &quot;A String&quot;,
},
- &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time.
+ &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type.
&quot;dayOfWeek&quot;: &quot;A String&quot;, # The day of week, when the window starts.
&quot;hourOfDay&quot;: 42, # The hour of day (0-23) when the window starts.
},
diff --git a/docs/dyn/metastore_v1beta.projects.locations.services.html b/docs/dyn/metastore_v1beta.projects.locations.services.html
index e9b52e2fe..77dd517cb 100644
--- a/docs/dyn/metastore_v1beta.projects.locations.services.html
+++ b/docs/dyn/metastore_v1beta.projects.locations.services.html
@@ -159,7 +159,7 @@ Args:
&quot;labels&quot;: { # User-defined labels for the metastore service.
&quot;a_key&quot;: &quot;A String&quot;,
},
- &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time.
+ &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type.
&quot;dayOfWeek&quot;: &quot;A String&quot;, # The day of week, when the window starts.
&quot;hourOfDay&quot;: 42, # The hour of day (0-23) when the window starts.
},
@@ -356,7 +356,7 @@ Returns:
&quot;labels&quot;: { # User-defined labels for the metastore service.
&quot;a_key&quot;: &quot;A String&quot;,
},
- &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time.
+ &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type.
&quot;dayOfWeek&quot;: &quot;A String&quot;, # The day of week, when the window starts.
&quot;hourOfDay&quot;: 42, # The hour of day (0-23) when the window starts.
},
@@ -498,7 +498,7 @@ Returns:
&quot;labels&quot;: { # User-defined labels for the metastore service.
&quot;a_key&quot;: &quot;A String&quot;,
},
- &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time.
+ &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type.
&quot;dayOfWeek&quot;: &quot;A String&quot;, # The day of week, when the window starts.
&quot;hourOfDay&quot;: 42, # The hour of day (0-23) when the window starts.
},
@@ -599,7 +599,7 @@ Args:
&quot;labels&quot;: { # User-defined labels for the metastore service.
&quot;a_key&quot;: &quot;A String&quot;,
},
- &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time.
+ &quot;maintenanceWindow&quot;: { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type.
&quot;dayOfWeek&quot;: &quot;A String&quot;, # The day of week, when the window starts.
&quot;hourOfDay&quot;: 42, # The hour of day (0-23) when the window starts.
},
diff --git a/docs/dyn/monitoring_v1.projects.dashboards.html b/docs/dyn/monitoring_v1.projects.dashboards.html
index f86699d6e..1a23efd91 100644
--- a/docs/dyn/monitoring_v1.projects.dashboards.html
+++ b/docs/dyn/monitoring_v1.projects.dashboards.html
@@ -218,6 +218,92 @@ Args:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
@@ -439,6 +525,92 @@ Args:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
@@ -658,6 +830,92 @@ Args:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
@@ -882,6 +1140,92 @@ Args:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
@@ -1115,6 +1459,92 @@ Returns:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
@@ -1336,6 +1766,92 @@ Returns:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
@@ -1555,6 +2071,92 @@ Returns:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
@@ -1779,6 +2381,92 @@ Returns:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
@@ -2036,6 +2724,92 @@ Returns:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
@@ -2257,6 +3031,92 @@ Returns:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
@@ -2476,6 +3336,92 @@ Returns:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
@@ -2700,6 +3646,92 @@ Returns:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
@@ -2943,6 +3975,92 @@ Returns:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
@@ -3164,6 +4282,92 @@ Returns:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
@@ -3383,6 +4587,92 @@ Returns:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
@@ -3607,6 +4897,92 @@ Returns:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
@@ -3858,6 +5234,92 @@ Args:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
@@ -4079,6 +5541,92 @@ Args:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
@@ -4298,6 +5846,92 @@ Args:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
@@ -4522,6 +6156,92 @@ Args:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
@@ -4755,6 +6475,92 @@ Returns:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
@@ -4976,6 +6782,92 @@ Returns:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
@@ -5195,6 +7087,92 @@ Returns:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
@@ -5419,6 +7397,92 @@ Returns:
&quot;content&quot;: &quot;A String&quot;, # The text content to be displayed.
&quot;format&quot;: &quot;A String&quot;, # How the text content is formatted.
},
+ &quot;timeSeriesTable&quot;: { # A table that displays time series data. # A widget that displays time series data in a tabular format.
+ &quot;dataSets&quot;: [ # Required. The data displayed in this table.
+ { # Groups a time series query definition with table options.
+ &quot;minAlignmentPeriod&quot;: &quot;A String&quot;, # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.
+ &quot;tableDisplayOptions&quot;: { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered.
+ &quot;shownColumns&quot;: [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.
+ &quot;A String&quot;,
+ ],
+ },
+ &quot;tableTemplate&quot;: &quot;A String&quot;, # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label&#x27;s value i.e. &quot;${resource.labels.project_id}.&quot;
+ &quot;timeSeriesQuery&quot;: { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API.
+ &quot;timeSeriesFilter&quot;: { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesFilterRatio&quot;: { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters.
+ &quot;denominator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;numerator&quot;: { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio.
+ &quot;aggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;filter&quot;: &quot;A String&quot;, # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.
+ },
+ &quot;pickTimeSeriesFilter&quot;: { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter.
+ &quot;direction&quot;: &quot;A String&quot;, # How to use the ranking to select time series that pass through the filter.
+ &quot;numTimeSeries&quot;: 42, # How many time series to allow to pass through the filter.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series.
+ },
+ &quot;secondaryAggregation&quot;: { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example &quot;the 95% latency across the average of all tasks in a cluster&quot;. This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed.
+ &quot;alignmentPeriod&quot;: &quot;A String&quot;, # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.
+ &quot;crossSeriesReducer&quot;: &quot;A String&quot;, # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.
+ &quot;groupByFields&quot;: [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.
+ &quot;A String&quot;,
+ ],
+ &quot;perSeriesAligner&quot;: &quot;A String&quot;, # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.
+ },
+ &quot;statisticalTimeSeriesFilter&quot;: { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API.
+ &quot;numTimeSeries&quot;: 42, # How many time series to output.
+ &quot;rankingMethod&quot;: &quot;A String&quot;, # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series.
+ },
+ },
+ &quot;timeSeriesQueryLanguage&quot;: &quot;A String&quot;, # A query used to fetch time series.
+ &quot;unitOverride&quot;: &quot;A String&quot;, # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor.
+ },
+ },
+ ],
+ },
&quot;title&quot;: &quot;A String&quot;, # Optional. The title of the widget.
&quot;xyChart&quot;: { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data.
&quot;chartOptions&quot;: { # Options to control visual rendering of a chart. # Display options for the chart.
diff --git a/docs/dyn/policyanalyzer_v1.projects.locations.activityTypes.activities.html b/docs/dyn/policyanalyzer_v1.projects.locations.activityTypes.activities.html
index 58913ddcb..d76837aed 100644
--- a/docs/dyn/policyanalyzer_v1.projects.locations.activityTypes.activities.html
+++ b/docs/dyn/policyanalyzer_v1.projects.locations.activityTypes.activities.html
@@ -95,7 +95,7 @@ h1, h2, h3 {
Args:
parent: string, Required. The container resource on which to execute the request. Acceptable formats: `projects/[PROJECT_ID|PROJECT_NUMBER]/locations/[LOCATION]/activityTypes/[ACTIVITY_TYPE]` LOCATION here refers to Google Cloud Locations: https://cloud.google.com/about/locations/ (required)
- filter: string, Optional. Filter expression to restrict the activities returned. Supported filters are: - service_account_last_authn.full_resource_name {=} [STRING] - service_account_key_last_authn.full_resource_name {=} [STRING]
+ filter: string, Optional. Filter expression to restrict the activities returned. For serviceAccountLastAuthentication activities, supported filters are: - `activities.full_resource_name {=} [STRING]` - `activities.fullResourceName {=} [STRING]` where `[STRING]` is the full resource name of the service account. For serviceAccountKeyLastAuthentication activities, supported filters are: - `activities.full_resource_name {=} [STRING]` - `activities.fullResourceName {=} [STRING]` where `[STRING]` is the full resource name of the service account key.
pageSize: integer, Optional. The maximum number of results to return from this request. Max limit is 1000. Non-positive values are ignored. The presence of `nextPageToken` in the response indicates that more results might be available.
pageToken: string, Optional. If present, then retrieve the next batch of results from the preceding call to this method. `pageToken` must be the value of `nextPageToken` from the previous response. The values of other method parameters should be identical to those in the previous call.
x__xgafv: string, V1 error format.
diff --git a/docs/dyn/retail_v2.projects.locations.catalogs.branches.products.html b/docs/dyn/retail_v2.projects.locations.catalogs.branches.products.html
index dcac944f8..f19aa2b99 100644
--- a/docs/dyn/retail_v2.projects.locations.catalogs.branches.products.html
+++ b/docs/dyn/retail_v2.projects.locations.catalogs.branches.products.html
@@ -206,7 +206,7 @@ Args:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -236,7 +236,7 @@ Args:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -338,7 +338,7 @@ Returns:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -368,7 +368,7 @@ Returns:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -494,7 +494,7 @@ Returns:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -524,7 +524,7 @@ Returns:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -652,7 +652,7 @@ Args:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -682,7 +682,7 @@ Args:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -833,7 +833,7 @@ Returns:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -863,7 +863,7 @@ Returns:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -941,7 +941,7 @@ Returns:
<pre>Updates a Product.
Args:
- name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;. (required)
+ name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. (required)
body: object, The request body.
The object takes the form of:
@@ -982,7 +982,7 @@ Args:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -1012,7 +1012,7 @@ Args:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -1115,7 +1115,7 @@ Returns:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -1145,7 +1145,7 @@ Returns:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -1254,7 +1254,7 @@ Returns:
<pre>Updates inventory information for a Product while respecting the last update timestamps of each inventory field. This process is asynchronous and does not require the Product to exist before updating fulfillment information. If the request is valid, the update will be enqueued and processed downstream. As a consequence, when a response is returned, updates are not immediately manifested in the Product queried by GetProduct or ListProducts. When inventory is updated with CreateProduct and UpdateProduct, the specified inventory field value(s) will overwrite any existing value(s) while ignoring the last update time for this field. Furthermore, the last update time for the specified inventory fields will be overwritten to the time of the CreateProduct or UpdateProduct request. If no inventory fields are set in CreateProductRequest.product, then any pre-existing inventory information for this product will be used. If no inventory fields are set in UpdateProductRequest.set_mask, then any existing inventory information will be preserved. Pre-existing inventory information can only be updated with SetInventory, AddFulfillmentPlaces, and RemoveFulfillmentPlaces. This feature is only available for users who have Retail Search enabled. Please submit a form [here](https://cloud.google.com/contact) to contact cloud sales if you are interested in using Retail Search.
Args:
- name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;. (required)
+ name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. (required)
body: object, The request body.
The object takes the form of:
@@ -1297,7 +1297,7 @@ Args:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -1327,7 +1327,7 @@ Args:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
diff --git a/docs/dyn/retail_v2.projects.locations.catalogs.placements.html b/docs/dyn/retail_v2.projects.locations.catalogs.placements.html
index 9e144715b..322518102 100644
--- a/docs/dyn/retail_v2.projects.locations.catalogs.placements.html
+++ b/docs/dyn/retail_v2.projects.locations.catalogs.placements.html
@@ -182,7 +182,7 @@ Args:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -212,7 +212,7 @@ Args:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -475,7 +475,7 @@ Returns:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -505,7 +505,7 @@ Returns:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
diff --git a/docs/dyn/retail_v2.projects.locations.catalogs.userEvents.html b/docs/dyn/retail_v2.projects.locations.catalogs.userEvents.html
index 27725cc8b..0d720adbe 100644
--- a/docs/dyn/retail_v2.projects.locations.catalogs.userEvents.html
+++ b/docs/dyn/retail_v2.projects.locations.catalogs.userEvents.html
@@ -231,7 +231,7 @@ Args:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -261,7 +261,7 @@ Args:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -537,7 +537,7 @@ Args:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -567,7 +567,7 @@ Args:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -724,7 +724,7 @@ Returns:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -754,7 +754,7 @@ Returns:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
diff --git a/docs/dyn/retail_v2alpha.projects.locations.catalogs.branches.products.html b/docs/dyn/retail_v2alpha.projects.locations.catalogs.branches.products.html
index e01015979..485e5e412 100644
--- a/docs/dyn/retail_v2alpha.projects.locations.catalogs.branches.products.html
+++ b/docs/dyn/retail_v2alpha.projects.locations.catalogs.branches.products.html
@@ -206,7 +206,7 @@ Args:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -236,7 +236,7 @@ Args:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -338,7 +338,7 @@ Returns:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -368,7 +368,7 @@ Returns:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -494,7 +494,7 @@ Returns:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -524,7 +524,7 @@ Returns:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -652,7 +652,7 @@ Args:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -682,7 +682,7 @@ Args:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -834,7 +834,7 @@ Returns:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -864,7 +864,7 @@ Returns:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -943,7 +943,7 @@ Returns:
<pre>Updates a Product.
Args:
- name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;. (required)
+ name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. (required)
body: object, The request body.
The object takes the form of:
@@ -984,7 +984,7 @@ Args:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -1014,7 +1014,7 @@ Args:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -1117,7 +1117,7 @@ Returns:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -1147,7 +1147,7 @@ Returns:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -1256,7 +1256,7 @@ Returns:
<pre>Updates inventory information for a Product while respecting the last update timestamps of each inventory field. This process is asynchronous and does not require the Product to exist before updating fulfillment information. If the request is valid, the update will be enqueued and processed downstream. As a consequence, when a response is returned, updates are not immediately manifested in the Product queried by GetProduct or ListProducts. When inventory is updated with CreateProduct and UpdateProduct, the specified inventory field value(s) will overwrite any existing value(s) while ignoring the last update time for this field. Furthermore, the last update time for the specified inventory fields will be overwritten to the time of the CreateProduct or UpdateProduct request. If no inventory fields are set in CreateProductRequest.product, then any pre-existing inventory information for this product will be used. If no inventory fields are set in UpdateProductRequest.set_mask, then any existing inventory information will be preserved. Pre-existing inventory information can only be updated with SetInventory, AddFulfillmentPlaces, and RemoveFulfillmentPlaces. This feature is only available for users who have Retail Search enabled. Please submit a form [here](https://cloud.google.com/contact) to contact cloud sales if you are interested in using Retail Search.
Args:
- name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;. (required)
+ name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. (required)
body: object, The request body.
The object takes the form of:
@@ -1299,7 +1299,7 @@ Args:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -1329,7 +1329,7 @@ Args:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
diff --git a/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html b/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html
index e28eb9cac..46db30ae4 100644
--- a/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html
+++ b/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html
@@ -182,7 +182,7 @@ Args:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -212,7 +212,7 @@ Args:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -476,7 +476,7 @@ Returns:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -506,7 +506,7 @@ Returns:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
diff --git a/docs/dyn/retail_v2alpha.projects.locations.catalogs.userEvents.html b/docs/dyn/retail_v2alpha.projects.locations.catalogs.userEvents.html
index 4bf37d204..6307920f2 100644
--- a/docs/dyn/retail_v2alpha.projects.locations.catalogs.userEvents.html
+++ b/docs/dyn/retail_v2alpha.projects.locations.catalogs.userEvents.html
@@ -231,7 +231,7 @@ Args:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -261,7 +261,7 @@ Args:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -537,7 +537,7 @@ Args:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -567,7 +567,7 @@ Args:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -724,7 +724,7 @@ Returns:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -754,7 +754,7 @@ Returns:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
diff --git a/docs/dyn/retail_v2beta.projects.locations.catalogs.branches.products.html b/docs/dyn/retail_v2beta.projects.locations.catalogs.branches.products.html
index e96135793..21286a24c 100644
--- a/docs/dyn/retail_v2beta.projects.locations.catalogs.branches.products.html
+++ b/docs/dyn/retail_v2beta.projects.locations.catalogs.branches.products.html
@@ -206,7 +206,7 @@ Args:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -236,7 +236,7 @@ Args:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -338,7 +338,7 @@ Returns:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -368,7 +368,7 @@ Returns:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -494,7 +494,7 @@ Returns:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -524,7 +524,7 @@ Returns:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -652,7 +652,7 @@ Args:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -682,7 +682,7 @@ Args:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -833,7 +833,7 @@ Returns:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -863,7 +863,7 @@ Returns:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -941,7 +941,7 @@ Returns:
<pre>Updates a Product.
Args:
- name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;. (required)
+ name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. (required)
body: object, The request body.
The object takes the form of:
@@ -982,7 +982,7 @@ Args:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -1012,7 +1012,7 @@ Args:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -1115,7 +1115,7 @@ Returns:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -1145,7 +1145,7 @@ Returns:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -1254,7 +1254,7 @@ Returns:
<pre>Updates inventory information for a Product while respecting the last update timestamps of each inventory field. This process is asynchronous and does not require the Product to exist before updating fulfillment information. If the request is valid, the update will be enqueued and processed downstream. As a consequence, when a response is returned, updates are not immediately manifested in the Product queried by GetProduct or ListProducts. When inventory is updated with CreateProduct and UpdateProduct, the specified inventory field value(s) will overwrite any existing value(s) while ignoring the last update time for this field. Furthermore, the last update time for the specified inventory fields will be overwritten to the time of the CreateProduct or UpdateProduct request. If no inventory fields are set in CreateProductRequest.product, then any pre-existing inventory information for this product will be used. If no inventory fields are set in UpdateProductRequest.set_mask, then any existing inventory information will be preserved. Pre-existing inventory information can only be updated with SetInventory, AddFulfillmentPlaces, and RemoveFulfillmentPlaces. This feature is only available for users who have Retail Search enabled. Please submit a form [here](https://cloud.google.com/contact) to contact cloud sales if you are interested in using Retail Search.
Args:
- name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;. (required)
+ name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. (required)
body: object, The request body.
The object takes the form of:
@@ -1297,7 +1297,7 @@ Args:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -1327,7 +1327,7 @@ Args:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
diff --git a/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html b/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html
index 63ba49512..ebccfc2e4 100644
--- a/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html
+++ b/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html
@@ -182,7 +182,7 @@ Args:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -212,7 +212,7 @@ Args:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -475,7 +475,7 @@ Returns:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -505,7 +505,7 @@ Returns:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
diff --git a/docs/dyn/retail_v2beta.projects.locations.catalogs.userEvents.html b/docs/dyn/retail_v2beta.projects.locations.catalogs.userEvents.html
index e7a117b7c..512939a1a 100644
--- a/docs/dyn/retail_v2beta.projects.locations.catalogs.userEvents.html
+++ b/docs/dyn/retail_v2beta.projects.locations.catalogs.userEvents.html
@@ -231,7 +231,7 @@ Args:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -261,7 +261,7 @@ Args:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -537,7 +537,7 @@ Args:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -567,7 +567,7 @@ Args:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
@@ -724,7 +724,7 @@ Returns:
&quot;colorFamilies&quot;: [ # The standard color families. Strongly recommended to use the following standard color groups: &quot;Red&quot;, &quot;Pink&quot;, &quot;Orange&quot;, &quot;Yellow&quot;, &quot;Purple&quot;, &quot;Green&quot;, &quot;Cyan&quot;, &quot;Blue&quot;, &quot;Brown&quot;, &quot;White&quot;, &quot;Gray&quot;, &quot;Black&quot; and &quot;Mixed&quot;. Normally it is expected to have only 1 color family. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
- &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
+ &quot;colors&quot;: [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single &quot;Mixed&quot; instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).
&quot;A String&quot;,
],
},
@@ -754,7 +754,7 @@ Returns:
&quot;materials&quot;: [ # The material of the product. For example, &quot;leather&quot;, &quot;wooden&quot;. A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material).
&quot;A String&quot;,
],
- &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be &quot;default_branch&quot;.
+ &quot;name&quot;: &quot;A String&quot;, # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.
&quot;patterns&quot;: [ # The pattern or graphic print of the product. For example, &quot;striped&quot;, &quot;polka dot&quot;, &quot;paisley&quot;. A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern).
&quot;A String&quot;,
],
diff --git a/docs/dyn/servicenetworking_v1.services.html b/docs/dyn/servicenetworking_v1.services.html
index b8d9933e3..f31364030 100644
--- a/docs/dyn/servicenetworking_v1.services.html
+++ b/docs/dyn/servicenetworking_v1.services.html
@@ -132,6 +132,7 @@ Args:
&quot;consumerNetwork&quot;: &quot;A String&quot;, # Required. The name of the service consumer&#x27;s VPC network. The network must have an existing private connection that was provisioned through the connections.create method. The name must be in the following format: `projects/{project}/global/networks/{network}`, where {project} is a project number, such as `12345`. {network} is the name of a VPC network in the project.
&quot;description&quot;: &quot;A String&quot;, # Optional. Description of the subnet.
&quot;ipPrefixLength&quot;: 42, # Required. The prefix length of the subnet&#x27;s IP address range. Use CIDR range notation, such as `30` to provision a subnet with an `x.x.x.x/30` CIDR range. The IP address range is drawn from a pool of available ranges in the service consumer&#x27;s allocated range.
+ &quot;outsideAllocationPublicIpRange&quot;: &quot;A String&quot;, # Optional. Enable outside allocation using public IP addresses. Any public IP range may be specified. If this field is provided, we will not use customer reserved ranges for this primary IP range.
&quot;privateIpv6GoogleAccess&quot;: &quot;A String&quot;, # Optional. The private IPv6 google access type for the VMs in this subnet. For information about the access types that can be set using this field, see [subnetwork](https://cloud.google.com/compute/docs/reference/rest/v1/subnetworks) in the Compute API documentation.
&quot;region&quot;: &quot;A String&quot;, # Required. The name of a [region](/compute/docs/regions-zones) for the subnet, such `europe-west1`.
&quot;requestedAddress&quot;: &quot;A String&quot;, # Optional. The starting address of a range. The address must be a valid IPv4 address in the x.x.x.x format. This value combined with the IP prefix range is the CIDR range for the subnet. The range must be within the allocated range that is assigned to the private connection. If the CIDR range isn&#x27;t available, the call fails.
@@ -141,6 +142,7 @@ Args:
&quot;secondaryIpRangeSpecs&quot;: [ # Optional. A list of secondary IP ranges to be created within the new subnetwork.
{
&quot;ipPrefixLength&quot;: 42, # Required. The prefix length of the secondary IP range. Use CIDR range notation, such as `30` to provision a secondary IP range with an `x.x.x.x/30` CIDR range. The IP address range is drawn from a pool of available ranges in the service consumer&#x27;s allocated range.
+ &quot;outsideAllocationPublicIpRange&quot;: &quot;A String&quot;, # Optional. Enable outside allocation using public IP addresses. Any public IP range may be specified. If this field is provided, we will not use customer reserved ranges for this secondary IP range.
&quot;rangeName&quot;: &quot;A String&quot;, # Required. A name for the secondary IP range. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork.
&quot;requestedAddress&quot;: &quot;A String&quot;, # Optional. The starting address of a range. The address must be a valid IPv4 address in the x.x.x.x format. This value combined with the IP prefix range is the CIDR range for the secondary IP range. The range must be within the allocated range that is assigned to the private connection. If the CIDR range isn&#x27;t available, the call fails.
},
diff --git a/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json b/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json
index 0e7d59ff1..2d23da134 100644
--- a/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json
+++ b/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json
@@ -115,7 +115,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210925",
"rootUrl": "https://acceleratedmobilepageurl.googleapis.com/",
"schemas": {
"AmpUrl": {
diff --git a/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json b/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json
index 26ad192fb..125e66bb9 100644
--- a/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json
+++ b/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json
@@ -2568,7 +2568,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210925",
"rootUrl": "https://adexchangebuyer.googleapis.com/",
"schemas": {
"AbsoluteDateRange": {
@@ -2835,7 +2835,7 @@
"type": "string"
},
"clientName": {
- "description": "Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty.",
+ "description": "Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty. Maximum length of 255 characters is allowed.",
"type": "string"
},
"entityId": {
diff --git a/googleapiclient/discovery_cache/documents/admin.datatransfer_v1.json b/googleapiclient/discovery_cache/documents/admin.datatransfer_v1.json
index e8964f666..8931593f4 100644
--- a/googleapiclient/discovery_cache/documents/admin.datatransfer_v1.json
+++ b/googleapiclient/discovery_cache/documents/admin.datatransfer_v1.json
@@ -272,7 +272,7 @@
}
}
},
- "revision": "20210914",
+ "revision": "20210920",
"rootUrl": "https://admin.googleapis.com/",
"schemas": {
"Application": {
diff --git a/googleapiclient/discovery_cache/documents/admin.directory_v1.json b/googleapiclient/discovery_cache/documents/admin.directory_v1.json
index 46bedba76..87b93955c 100644
--- a/googleapiclient/discovery_cache/documents/admin.directory_v1.json
+++ b/googleapiclient/discovery_cache/documents/admin.directory_v1.json
@@ -4397,7 +4397,7 @@
}
}
},
- "revision": "20210914",
+ "revision": "20210920",
"rootUrl": "https://admin.googleapis.com/",
"schemas": {
"Alias": {
diff --git a/googleapiclient/discovery_cache/documents/admin.reports_v1.json b/googleapiclient/discovery_cache/documents/admin.reports_v1.json
index 0da8099d7..02d744b23 100644
--- a/googleapiclient/discovery_cache/documents/admin.reports_v1.json
+++ b/googleapiclient/discovery_cache/documents/admin.reports_v1.json
@@ -631,7 +631,7 @@
}
}
},
- "revision": "20210914",
+ "revision": "20210920",
"rootUrl": "https://admin.googleapis.com/",
"schemas": {
"Activities": {
diff --git a/googleapiclient/discovery_cache/documents/admob.v1.json b/googleapiclient/discovery_cache/documents/admob.v1.json
index a5cca3ea2..81d7a6680 100644
--- a/googleapiclient/discovery_cache/documents/admob.v1.json
+++ b/googleapiclient/discovery_cache/documents/admob.v1.json
@@ -321,7 +321,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210925",
"rootUrl": "https://admob.googleapis.com/",
"schemas": {
"AdUnit": {
diff --git a/googleapiclient/discovery_cache/documents/admob.v1beta.json b/googleapiclient/discovery_cache/documents/admob.v1beta.json
index 6ee3b30ac..d81c570e7 100644
--- a/googleapiclient/discovery_cache/documents/admob.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/admob.v1beta.json
@@ -321,7 +321,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210925",
"rootUrl": "https://admob.googleapis.com/",
"schemas": {
"AdUnit": {
diff --git a/googleapiclient/discovery_cache/documents/adsense.v2.json b/googleapiclient/discovery_cache/documents/adsense.v2.json
index cc97cceba..abe64e70a 100644
--- a/googleapiclient/discovery_cache/documents/adsense.v2.json
+++ b/googleapiclient/discovery_cache/documents/adsense.v2.json
@@ -1567,7 +1567,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210925",
"rootUrl": "https://adsense.googleapis.com/",
"schemas": {
"Account": {
diff --git a/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json b/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json
index 332830c1b..1e2fdbda0 100644
--- a/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json
@@ -423,7 +423,7 @@
}
}
},
- "revision": "20210907",
+ "revision": "20210921",
"rootUrl": "https://alertcenter.googleapis.com/",
"schemas": {
"AccountSuspensionDetails": {
diff --git a/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json b/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json
index 8e9ede4df..58509fc85 100644
--- a/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json
+++ b/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json
@@ -3152,7 +3152,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://analyticsadmin.googleapis.com/",
"schemas": {
"GoogleAnalyticsAdminV1alphaAccount": {
@@ -4464,6 +4464,10 @@
"description": "A resource message representing a Google Analytics GA4 property.",
"id": "GoogleAnalyticsAdminV1alphaProperty",
"properties": {
+ "account": {
+ "description": "Immutable. The resource name of the parent account Format: accounts/{account_id} Example: \"accounts/123\"",
+ "type": "string"
+ },
"createTime": {
"description": "Output only. Time when the entity was originally created.",
"format": "google-datetime",
diff --git a/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json b/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json
index f52414d25..81a8553ed 100644
--- a/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json
@@ -313,7 +313,7 @@
}
}
},
- "revision": "20210917",
+ "revision": "20210924",
"rootUrl": "https://analyticsdata.googleapis.com/",
"schemas": {
"BatchRunPivotReportsRequest": {
diff --git a/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json b/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json
index 69a9aa8d4..8a348c04d 100644
--- a/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json
+++ b/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json
@@ -825,7 +825,7 @@
}
}
},
- "revision": "20210914",
+ "revision": "20210924",
"rootUrl": "https://androiddeviceprovisioning.googleapis.com/",
"schemas": {
"ClaimDeviceRequest": {
diff --git a/googleapiclient/discovery_cache/documents/androidenterprise.v1.json b/googleapiclient/discovery_cache/documents/androidenterprise.v1.json
index c1a7f034f..131ef9fba 100644
--- a/googleapiclient/discovery_cache/documents/androidenterprise.v1.json
+++ b/googleapiclient/discovery_cache/documents/androidenterprise.v1.json
@@ -2610,7 +2610,7 @@
}
}
},
- "revision": "20210916",
+ "revision": "20210923",
"rootUrl": "https://androidenterprise.googleapis.com/",
"schemas": {
"Administrator": {
diff --git a/googleapiclient/discovery_cache/documents/androidpublisher.v3.json b/googleapiclient/discovery_cache/documents/androidpublisher.v3.json
index 7ab3174f2..60b7573c0 100644
--- a/googleapiclient/discovery_cache/documents/androidpublisher.v3.json
+++ b/googleapiclient/discovery_cache/documents/androidpublisher.v3.json
@@ -2681,7 +2681,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210925",
"rootUrl": "https://androidpublisher.googleapis.com/",
"schemas": {
"Apk": {
diff --git a/googleapiclient/discovery_cache/documents/apigateway.v1.json b/googleapiclient/discovery_cache/documents/apigateway.v1.json
index 184bdc32d..dc2bc7fa3 100644
--- a/googleapiclient/discovery_cache/documents/apigateway.v1.json
+++ b/googleapiclient/discovery_cache/documents/apigateway.v1.json
@@ -1083,7 +1083,7 @@
}
}
},
- "revision": "20210908",
+ "revision": "20210915",
"rootUrl": "https://apigateway.googleapis.com/",
"schemas": {
"ApigatewayApi": {
diff --git a/googleapiclient/discovery_cache/documents/apigateway.v1beta.json b/googleapiclient/discovery_cache/documents/apigateway.v1beta.json
index e4f65c429..90166bee1 100644
--- a/googleapiclient/discovery_cache/documents/apigateway.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/apigateway.v1beta.json
@@ -1083,7 +1083,7 @@
}
}
},
- "revision": "20210908",
+ "revision": "20210915",
"rootUrl": "https://apigateway.googleapis.com/",
"schemas": {
"ApigatewayApi": {
diff --git a/googleapiclient/discovery_cache/documents/apigee.v1.json b/googleapiclient/discovery_cache/documents/apigee.v1.json
index 7cda74ffc..3821ddc6a 100644
--- a/googleapiclient/discovery_cache/documents/apigee.v1.json
+++ b/googleapiclient/discovery_cache/documents/apigee.v1.json
@@ -10978,6 +10978,7 @@
"type": "string"
},
"displayName": {
+ "description": "Display name for the Apigee organization. Unused, but reserved for future use.",
"type": "string"
},
"environments": {
@@ -11443,7 +11444,7 @@
"type": "string"
},
"paymentFundingModel": {
- "description": "Flag that specifies the billing account type, prepaid or postpaid.",
+ "description": "DEPRECATED: This field is no longer supported and will eventually be removed when Apigee Hybrid 1.5/1.6 is no longer supported. Instead, use the `billingType` field inside `DeveloperMonetizationConfig` resource. Flag that specifies the billing account type, prepaid or postpaid.",
"enum": [
"PAYMENT_FUNDING_MODEL_UNSPECIFIED",
"PREPAID",
diff --git a/googleapiclient/discovery_cache/documents/apikeys.v2.json b/googleapiclient/discovery_cache/documents/apikeys.v2.json
index 8bbb17e5c..54d794af2 100644
--- a/googleapiclient/discovery_cache/documents/apikeys.v2.json
+++ b/googleapiclient/discovery_cache/documents/apikeys.v2.json
@@ -424,7 +424,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210924",
"rootUrl": "https://apikeys.googleapis.com/",
"schemas": {
"Operation": {
diff --git a/googleapiclient/discovery_cache/documents/appengine.v1.json b/googleapiclient/discovery_cache/documents/appengine.v1.json
index 950f12fb8..d713be253 100644
--- a/googleapiclient/discovery_cache/documents/appengine.v1.json
+++ b/googleapiclient/discovery_cache/documents/appengine.v1.json
@@ -1595,7 +1595,7 @@
}
}
},
- "revision": "20210911",
+ "revision": "20210918",
"rootUrl": "https://appengine.googleapis.com/",
"schemas": {
"ApiConfigHandler": {
@@ -3305,6 +3305,13 @@
"description": "Relative name of the service within the application. Example: default.@OutputOnly",
"type": "string"
},
+ "labels": {
+ "additionalProperties": {
+ "type": "string"
+ },
+ "description": "A set of labels to apply to this service. Labels are key/value pairs that describe the service and all resources that belong to it (e.g., versions). The labels can be used to search and group resources, and are propagated to the usage and billing reports, enabling fine-grain analysis of costs. An example of using labels is to tag resources belonging to different environments (e.g., \"env=prod\", \"env=qa\"). Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores, dashes, and international characters. Label keys must start with a lowercase letter or an international character. Each service can have at most 32 labels.",
+ "type": "object"
+ },
"name": {
"description": "Full path to the Service resource in the API. Example: apps/myapp/services/default.@OutputOnly",
"type": "string"
diff --git a/googleapiclient/discovery_cache/documents/appengine.v1alpha.json b/googleapiclient/discovery_cache/documents/appengine.v1alpha.json
index 11249a9f5..4d071d1f0 100644
--- a/googleapiclient/discovery_cache/documents/appengine.v1alpha.json
+++ b/googleapiclient/discovery_cache/documents/appengine.v1alpha.json
@@ -709,7 +709,7 @@
}
}
},
- "revision": "20210911",
+ "revision": "20210918",
"rootUrl": "https://appengine.googleapis.com/",
"schemas": {
"AuthorizedCertificate": {
diff --git a/googleapiclient/discovery_cache/documents/appengine.v1beta.json b/googleapiclient/discovery_cache/documents/appengine.v1beta.json
index 33ad19653..576ef4c59 100644
--- a/googleapiclient/discovery_cache/documents/appengine.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/appengine.v1beta.json
@@ -1595,7 +1595,7 @@
}
}
},
- "revision": "20210911",
+ "revision": "20210918",
"rootUrl": "https://appengine.googleapis.com/",
"schemas": {
"ApiConfigHandler": {
@@ -3368,6 +3368,13 @@
"description": "Relative name of the service within the application. Example: default.@OutputOnly",
"type": "string"
},
+ "labels": {
+ "additionalProperties": {
+ "type": "string"
+ },
+ "description": "A set of labels to apply to this service. Labels are key/value pairs that describe the service and all resources that belong to it (e.g., versions). The labels can be used to search and group resources, and are propagated to the usage and billing reports, enabling fine-grain analysis of costs. An example of using labels is to tag resources belonging to different environments (e.g., \"env=prod\", \"env=qa\"). Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores, dashes, and international characters. Label keys must start with a lowercase letter or an international character. Each service can have at most 32 labels.",
+ "type": "object"
+ },
"name": {
"description": "Full path to the Service resource in the API. Example: apps/myapp/services/default.@OutputOnly",
"type": "string"
diff --git a/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json b/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json
index 7f6ff319f..955b05a16 100644
--- a/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json
+++ b/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json
@@ -586,7 +586,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://area120tables.googleapis.com/",
"schemas": {
"BatchCreateRowsRequest": {
@@ -690,6 +690,10 @@
"description": "column name",
"type": "string"
},
+ "readonly": {
+ "description": "Optional. Indicates that values for the column cannot be set by the user.",
+ "type": "boolean"
+ },
"relationshipDetails": {
"$ref": "RelationshipDetails",
"description": "Optional. Additional details about a relationship column. Specified when data_type is relationship."
diff --git a/googleapiclient/discovery_cache/documents/artifactregistry.v1.json b/googleapiclient/discovery_cache/documents/artifactregistry.v1.json
index c1f3e03b5..a9af0fddd 100644
--- a/googleapiclient/discovery_cache/documents/artifactregistry.v1.json
+++ b/googleapiclient/discovery_cache/documents/artifactregistry.v1.json
@@ -352,7 +352,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210918",
"rootUrl": "https://artifactregistry.googleapis.com/",
"schemas": {
"AptArtifact": {
@@ -673,7 +673,6 @@
"DOCKER",
"MAVEN",
"NPM",
- "PYPI",
"APT",
"YUM",
"PYTHON"
@@ -683,7 +682,6 @@
"Docker package format.",
"Maven package format.",
"NPM package format.",
- "PyPI package format.",
"APT package format.",
"YUM package format.",
"Python package format."
diff --git a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json
index 3d77e0c82..a16380419 100644
--- a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json
@@ -929,7 +929,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210918",
"rootUrl": "https://artifactregistry.googleapis.com/",
"schemas": {
"AptArtifact": {
@@ -1456,7 +1456,7 @@
"Docker package format.",
"Maven package format.",
"NPM package format.",
- "PyPI package format.",
+ "PyPI package format. Deprecated, use PYTHON instead.",
"APT package format.",
"YUM package format.",
"Python package format."
diff --git a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json
index b776df5d4..ddb90bc14 100644
--- a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json
+++ b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json
@@ -1081,7 +1081,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210918",
"rootUrl": "https://artifactregistry.googleapis.com/",
"schemas": {
"AptArtifact": {
@@ -1630,7 +1630,7 @@
"Docker package format.",
"Maven package format.",
"NPM package format.",
- "PyPI package format.",
+ "PyPI package format. Deprecated, use PYTHON instead.",
"APT package format.",
"YUM package format.",
"Python package format."
diff --git a/googleapiclient/discovery_cache/documents/assuredworkloads.v1.json b/googleapiclient/discovery_cache/documents/assuredworkloads.v1.json
index 6227efd74..064c575b7 100644
--- a/googleapiclient/discovery_cache/documents/assuredworkloads.v1.json
+++ b/googleapiclient/discovery_cache/documents/assuredworkloads.v1.json
@@ -351,7 +351,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210916",
"rootUrl": "https://assuredworkloads.googleapis.com/",
"schemas": {
"GoogleCloudAssuredworkloadsV1CreateWorkloadOperationMetadata": {
@@ -425,7 +425,7 @@
"id": "GoogleCloudAssuredworkloadsV1Workload",
"properties": {
"billingAccount": {
- "description": "Required. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`.",
+ "description": "Optional. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`.",
"type": "string"
},
"complianceRegime": {
diff --git a/googleapiclient/discovery_cache/documents/bigquery.v2.json b/googleapiclient/discovery_cache/documents/bigquery.v2.json
index 021c5ec5f..d5838923c 100644
--- a/googleapiclient/discovery_cache/documents/bigquery.v2.json
+++ b/googleapiclient/discovery_cache/documents/bigquery.v2.json
@@ -338,7 +338,7 @@
]
},
"delete": {
- "description": "Requests that a job is deleted. This call will return when the job is deleted. This method is available in limited preview.",
+ "description": "Requests the deletion of the metadata of a job. This call returns when the job's metadata is deleted.",
"flatPath": "projects/{projectsId}/jobs/{jobsId}/delete",
"httpMethod": "DELETE",
"id": "bigquery.jobs.delete",
@@ -348,7 +348,7 @@
],
"parameters": {
"jobId": {
- "description": "Required. Job ID of the job to be deleted. If this is a parent job which has child jobs, all child jobs will be deleted as well. Deletion of child jobs directly is not allowed.",
+ "description": "Required. Job ID of the job for which metadata is to be deleted. If this is a parent job which has child jobs, the metadata from all child jobs will be deleted as well. Direct deletion of the metadata of child jobs is not allowed.",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
@@ -360,7 +360,7 @@
"type": "string"
},
"projectId": {
- "description": "Required. Project ID of the job to be deleted.",
+ "description": "Required. Project ID of the job for which metadata is to be deleted.",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
@@ -1683,7 +1683,7 @@
}
}
},
- "revision": "20210906",
+ "revision": "20210919",
"rootUrl": "https://bigquery.googleapis.com/",
"schemas": {
"AggregateClassificationMetrics": {
@@ -1868,7 +1868,7 @@
"YEARLY"
],
"enumDescriptions": [
- "",
+ "Unspecified seasonal period.",
"No seasonality",
"Daily period, 24 hours.",
"Weekly period, 7 days.",
@@ -1935,7 +1935,7 @@
"YEARLY"
],
"enumDescriptions": [
- "",
+ "Unspecified seasonal period.",
"No seasonality",
"Daily period, 24 hours.",
"Weekly period, 7 days.",
@@ -2007,7 +2007,7 @@
"YEARLY"
],
"enumDescriptions": [
- "",
+ "Unspecified seasonal period.",
"No seasonality",
"Daily period, 24 hours.",
"Weekly period, 7 days.",
@@ -2063,7 +2063,7 @@
"YEARLY"
],
"enumDescriptions": [
- "",
+ "Unspecified seasonal period.",
"No seasonality",
"Daily period, 24 hours.",
"Weekly period, 7 days.",
@@ -6087,10 +6087,53 @@
"format": "int64",
"type": "string"
},
+ "boosterType": {
+ "description": "Booster type for boosted tree models.",
+ "enum": [
+ "BOOSTER_TYPE_UNSPECIFIED",
+ "GBTREE",
+ "DART"
+ ],
+ "enumDescriptions": [
+ "Unspecified booster type.",
+ "Gbtree booster.",
+ "Dart booster."
+ ],
+ "type": "string"
+ },
"cleanSpikesAndDips": {
"description": "If true, clean spikes and dips in the input time series.",
"type": "boolean"
},
+ "colsampleBylevel": {
+ "description": "Subsample ratio of columns for each level for boosted tree models.",
+ "format": "double",
+ "type": "number"
+ },
+ "colsampleBynode": {
+ "description": "Subsample ratio of columns for each node(split) for boosted tree models.",
+ "format": "double",
+ "type": "number"
+ },
+ "colsampleBytree": {
+ "description": "Subsample ratio of columns when constructing each tree for boosted tree models.",
+ "format": "double",
+ "type": "number"
+ },
+ "dartNormalizeType": {
+ "description": "Type of normalization algorithm for boosted tree models using dart booster.",
+ "enum": [
+ "DART_NORMALIZE_TYPE_UNSPECIFIED",
+ "TREE",
+ "FOREST"
+ ],
+ "enumDescriptions": [
+ "Unspecified dart normalize type.",
+ "New trees have the same weight of each of dropped trees.",
+ "New trees have the same weight of sum of dropped trees."
+ ],
+ "type": "string"
+ },
"dataFrequency": {
"description": "The data frequency of a time series.",
"enum": [
@@ -6457,6 +6500,11 @@
"format": "double",
"type": "number"
},
+ "minTreeChildWeight": {
+ "description": "Minimum sum of instance weight needed in a child for boosted tree models.",
+ "format": "int64",
+ "type": "string"
+ },
"modelUri": {
"description": "Google Cloud Storage URI from which the model was imported. Only applicable for imported models.",
"type": "string"
@@ -6475,6 +6523,11 @@
"format": "int64",
"type": "string"
},
+ "numParallelTree": {
+ "description": "Number of parallel trees constructed during each iteration for boosted tree models.",
+ "format": "int64",
+ "type": "string"
+ },
"optimizationStrategy": {
"description": "Optimization strategy for training linear regression models.",
"enum": [
@@ -6517,6 +6570,24 @@
"description": "Column to be designated as time series timestamp for ARIMA model.",
"type": "string"
},
+ "treeMethod": {
+ "description": "Tree construction algorithm for boosted tree models.",
+ "enum": [
+ "TREE_METHOD_UNSPECIFIED",
+ "AUTO",
+ "EXACT",
+ "APPROX",
+ "HIST"
+ ],
+ "enumDescriptions": [
+ "Unspecified tree method.",
+ "Use heuristic to choose the fastest method.",
+ "Exact greedy algorithm.",
+ "Approximate greedy algorithm using quantile sketch and gradient histogram.",
+ "Fast histogram optimized approximate greedy algorithm."
+ ],
+ "type": "string"
+ },
"userColumn": {
"description": "User column specified for matrix factorization models.",
"type": "string"
diff --git a/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json b/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json
index a4f29491c..d541abfed 100644
--- a/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json
@@ -395,7 +395,7 @@
}
}
},
- "revision": "20210906",
+ "revision": "20210918",
"rootUrl": "https://bigqueryconnection.googleapis.com/",
"schemas": {
"AuditConfig": {
diff --git a/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json b/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json
index fb099b2fd..c8e1cefb2 100644
--- a/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json
+++ b/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json
@@ -1340,7 +1340,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210918",
"rootUrl": "https://bigquerydatatransfer.googleapis.com/",
"schemas": {
"CheckValidCredsRequest": {
diff --git a/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json b/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json
index 6d1b8c63f..20a8cfd10 100644
--- a/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json
+++ b/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json
@@ -788,7 +788,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210916",
"rootUrl": "https://bigqueryreservation.googleapis.com/",
"schemas": {
"Assignment": {
diff --git a/googleapiclient/discovery_cache/documents/bigqueryreservation.v1beta1.json b/googleapiclient/discovery_cache/documents/bigqueryreservation.v1beta1.json
index dadae7f09..07fd0284f 100644
--- a/googleapiclient/discovery_cache/documents/bigqueryreservation.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/bigqueryreservation.v1beta1.json
@@ -786,7 +786,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210916",
"rootUrl": "https://bigqueryreservation.googleapis.com/",
"schemas": {
"Assignment": {
diff --git a/googleapiclient/discovery_cache/documents/billingbudgets.v1.json b/googleapiclient/discovery_cache/documents/billingbudgets.v1.json
index f0d6ca5eb..bac88dabe 100644
--- a/googleapiclient/discovery_cache/documents/billingbudgets.v1.json
+++ b/googleapiclient/discovery_cache/documents/billingbudgets.v1.json
@@ -270,7 +270,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210924",
"rootUrl": "https://billingbudgets.googleapis.com/",
"schemas": {
"GoogleCloudBillingBudgetsV1Budget": {
diff --git a/googleapiclient/discovery_cache/documents/billingbudgets.v1beta1.json b/googleapiclient/discovery_cache/documents/billingbudgets.v1beta1.json
index b890e239c..4b3b85bec 100644
--- a/googleapiclient/discovery_cache/documents/billingbudgets.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/billingbudgets.v1beta1.json
@@ -264,7 +264,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210924",
"rootUrl": "https://billingbudgets.googleapis.com/",
"schemas": {
"GoogleCloudBillingBudgetsV1beta1AllUpdatesRule": {
diff --git a/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json b/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json
index dea596abc..d79530be9 100644
--- a/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json
+++ b/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json
@@ -551,7 +551,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210917",
"rootUrl": "https://binaryauthorization.googleapis.com/",
"schemas": {
"AdmissionRule": {
diff --git a/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json b/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json
index 22a20db12..e870bc49a 100644
--- a/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json
@@ -551,7 +551,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210917",
"rootUrl": "https://binaryauthorization.googleapis.com/",
"schemas": {
"AdmissionRule": {
diff --git a/googleapiclient/discovery_cache/documents/blogger.v2.json b/googleapiclient/discovery_cache/documents/blogger.v2.json
index 029165400..12ae33260 100644
--- a/googleapiclient/discovery_cache/documents/blogger.v2.json
+++ b/googleapiclient/discovery_cache/documents/blogger.v2.json
@@ -401,7 +401,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://blogger.googleapis.com/",
"schemas": {
"Blog": {
diff --git a/googleapiclient/discovery_cache/documents/blogger.v3.json b/googleapiclient/discovery_cache/documents/blogger.v3.json
index 931def836..4d61c0773 100644
--- a/googleapiclient/discovery_cache/documents/blogger.v3.json
+++ b/googleapiclient/discovery_cache/documents/blogger.v3.json
@@ -1678,7 +1678,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://blogger.googleapis.com/",
"schemas": {
"Blog": {
diff --git a/googleapiclient/discovery_cache/documents/books.v1.json b/googleapiclient/discovery_cache/documents/books.v1.json
index 5d111a28e..b2ac2ef14 100644
--- a/googleapiclient/discovery_cache/documents/books.v1.json
+++ b/googleapiclient/discovery_cache/documents/books.v1.json
@@ -2671,7 +2671,7 @@
}
}
},
- "revision": "20210912",
+ "revision": "20210920",
"rootUrl": "https://books.googleapis.com/",
"schemas": {
"Annotation": {
diff --git a/googleapiclient/discovery_cache/documents/calendar.v3.json b/googleapiclient/discovery_cache/documents/calendar.v3.json
index 7b61eb19b..d38eda0f2 100644
--- a/googleapiclient/discovery_cache/documents/calendar.v3.json
+++ b/googleapiclient/discovery_cache/documents/calendar.v3.json
@@ -1723,7 +1723,7 @@
}
}
},
- "revision": "20210913",
+ "revision": "20210924",
"rootUrl": "https://www.googleapis.com/",
"schemas": {
"Acl": {
diff --git a/googleapiclient/discovery_cache/documents/chat.v1.json b/googleapiclient/discovery_cache/documents/chat.v1.json
index e49c7dfb5..87d07ad4c 100644
--- a/googleapiclient/discovery_cache/documents/chat.v1.json
+++ b/googleapiclient/discovery_cache/documents/chat.v1.json
@@ -601,7 +601,7 @@
}
}
},
- "revision": "20210911",
+ "revision": "20210918",
"rootUrl": "https://chat.googleapis.com/",
"schemas": {
"ActionParameter": {
diff --git a/googleapiclient/discovery_cache/documents/chromemanagement.v1.json b/googleapiclient/discovery_cache/documents/chromemanagement.v1.json
index 664a354e7..3599a5465 100644
--- a/googleapiclient/discovery_cache/documents/chromemanagement.v1.json
+++ b/googleapiclient/discovery_cache/documents/chromemanagement.v1.json
@@ -382,7 +382,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://chromemanagement.googleapis.com/",
"schemas": {
"GoogleChromeManagementV1AndroidAppInfo": {
diff --git a/googleapiclient/discovery_cache/documents/chromepolicy.v1.json b/googleapiclient/discovery_cache/documents/chromepolicy.v1.json
index e573bab0c..15bb7ac6b 100644
--- a/googleapiclient/discovery_cache/documents/chromepolicy.v1.json
+++ b/googleapiclient/discovery_cache/documents/chromepolicy.v1.json
@@ -324,7 +324,7 @@
}
}
},
- "revision": "20210917",
+ "revision": "20210924",
"rootUrl": "https://chromepolicy.googleapis.com/",
"schemas": {
"GoogleChromePolicyV1AdditionalTargetKeyName": {
diff --git a/googleapiclient/discovery_cache/documents/chromeuxreport.v1.json b/googleapiclient/discovery_cache/documents/chromeuxreport.v1.json
index f42460af8..c10244f1e 100644
--- a/googleapiclient/discovery_cache/documents/chromeuxreport.v1.json
+++ b/googleapiclient/discovery_cache/documents/chromeuxreport.v1.json
@@ -116,7 +116,7 @@
}
}
},
- "revision": "20210916",
+ "revision": "20210923",
"rootUrl": "https://chromeuxreport.googleapis.com/",
"schemas": {
"Bin": {
diff --git a/googleapiclient/discovery_cache/documents/classroom.v1.json b/googleapiclient/discovery_cache/documents/classroom.v1.json
index 835de9bc5..9b177555c 100644
--- a/googleapiclient/discovery_cache/documents/classroom.v1.json
+++ b/googleapiclient/discovery_cache/documents/classroom.v1.json
@@ -2400,7 +2400,7 @@
}
}
},
- "revision": "20210920",
+ "revision": "20210922",
"rootUrl": "https://classroom.googleapis.com/",
"schemas": {
"Announcement": {
diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1.json
index 2f150529e..8e5e53c9c 100644
--- a/googleapiclient/discovery_cache/documents/cloudasset.v1.json
+++ b/googleapiclient/discovery_cache/documents/cloudasset.v1.json
@@ -727,7 +727,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210917",
"rootUrl": "https://cloudasset.googleapis.com/",
"schemas": {
"AccessSelector": {
diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1beta1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1beta1.json
index 8924250f3..34ce2ee10 100644
--- a/googleapiclient/discovery_cache/documents/cloudasset.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/cloudasset.v1beta1.json
@@ -411,7 +411,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210917",
"rootUrl": "https://cloudasset.googleapis.com/",
"schemas": {
"AnalyzeIamPolicyLongrunningMetadata": {
diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1p1beta1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1p1beta1.json
index 6a929294a..ddceaed87 100644
--- a/googleapiclient/discovery_cache/documents/cloudasset.v1p1beta1.json
+++ b/googleapiclient/discovery_cache/documents/cloudasset.v1p1beta1.json
@@ -207,7 +207,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210917",
"rootUrl": "https://cloudasset.googleapis.com/",
"schemas": {
"AnalyzeIamPolicyLongrunningMetadata": {
diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1p4beta1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1p4beta1.json
index 6b638efcd..60aa75350 100644
--- a/googleapiclient/discovery_cache/documents/cloudasset.v1p4beta1.json
+++ b/googleapiclient/discovery_cache/documents/cloudasset.v1p4beta1.json
@@ -221,7 +221,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210917",
"rootUrl": "https://cloudasset.googleapis.com/",
"schemas": {
"AccessSelector": {
diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1p5beta1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1p5beta1.json
index de63cfb57..aa18213fc 100644
--- a/googleapiclient/discovery_cache/documents/cloudasset.v1p5beta1.json
+++ b/googleapiclient/discovery_cache/documents/cloudasset.v1p5beta1.json
@@ -177,7 +177,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210917",
"rootUrl": "https://cloudasset.googleapis.com/",
"schemas": {
"AnalyzeIamPolicyLongrunningMetadata": {
diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1p7beta1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1p7beta1.json
index 45d111d26..ed407f406 100644
--- a/googleapiclient/discovery_cache/documents/cloudasset.v1p7beta1.json
+++ b/googleapiclient/discovery_cache/documents/cloudasset.v1p7beta1.json
@@ -167,7 +167,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210917",
"rootUrl": "https://cloudasset.googleapis.com/",
"schemas": {
"AnalyzeIamPolicyLongrunningMetadata": {
diff --git a/googleapiclient/discovery_cache/documents/cloudbilling.v1.json b/googleapiclient/discovery_cache/documents/cloudbilling.v1.json
index 418877065..929c52e39 100644
--- a/googleapiclient/discovery_cache/documents/cloudbilling.v1.json
+++ b/googleapiclient/discovery_cache/documents/cloudbilling.v1.json
@@ -521,7 +521,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://cloudbilling.googleapis.com/",
"schemas": {
"AggregationInfo": {
diff --git a/googleapiclient/discovery_cache/documents/cloudchannel.v1.json b/googleapiclient/discovery_cache/documents/cloudchannel.v1.json
index 2e889f48a..6eaf28521 100644
--- a/googleapiclient/discovery_cache/documents/cloudchannel.v1.json
+++ b/googleapiclient/discovery_cache/documents/cloudchannel.v1.json
@@ -1589,7 +1589,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://cloudchannel.googleapis.com/",
"schemas": {
"GoogleCloudChannelV1ActivateEntitlementRequest": {
diff --git a/googleapiclient/discovery_cache/documents/clouddebugger.v2.json b/googleapiclient/discovery_cache/documents/clouddebugger.v2.json
index 774d61890..f16deab7c 100644
--- a/googleapiclient/discovery_cache/documents/clouddebugger.v2.json
+++ b/googleapiclient/discovery_cache/documents/clouddebugger.v2.json
@@ -448,7 +448,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210917",
"rootUrl": "https://clouddebugger.googleapis.com/",
"schemas": {
"AliasContext": {
diff --git a/googleapiclient/discovery_cache/documents/clouderrorreporting.v1beta1.json b/googleapiclient/discovery_cache/documents/clouderrorreporting.v1beta1.json
index d5f968856..08d606f63 100644
--- a/googleapiclient/discovery_cache/documents/clouderrorreporting.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/clouderrorreporting.v1beta1.json
@@ -430,7 +430,7 @@
}
}
},
- "revision": "20210826",
+ "revision": "20210916",
"rootUrl": "https://clouderrorreporting.googleapis.com/",
"schemas": {
"DeleteEventsResponse": {
@@ -676,7 +676,7 @@
"description": "Optional. A description of the context in which the error occurred."
},
"eventTime": {
- "description": "Optional. Time when the event occurred. If not provided, the time when the event was received by the Error Reporting system will be used.",
+ "description": "Optional. Time when the event occurred. If not provided, the time when the event was received by the Error Reporting system is used. If provided, the time must not exceed the [logs retention period](https://cloud.google.com/logging/quotas#logs_retention_periods) in the past, or be more than 24 hours in the future. If an invalid time is provided, then an error is returned.",
"format": "google-datetime",
"type": "string"
},
diff --git a/googleapiclient/discovery_cache/documents/cloudfunctions.v1.json b/googleapiclient/discovery_cache/documents/cloudfunctions.v1.json
index f2cc77084..b2b8e4a96 100644
--- a/googleapiclient/discovery_cache/documents/cloudfunctions.v1.json
+++ b/googleapiclient/discovery_cache/documents/cloudfunctions.v1.json
@@ -546,7 +546,7 @@
}
}
},
- "revision": "20210907",
+ "revision": "20210916",
"rootUrl": "https://cloudfunctions.googleapis.com/",
"schemas": {
"AuditConfig": {
diff --git a/googleapiclient/discovery_cache/documents/cloudidentity.v1.json b/googleapiclient/discovery_cache/documents/cloudidentity.v1.json
index 15b834ee4..9323bdce7 100644
--- a/googleapiclient/discovery_cache/documents/cloudidentity.v1.json
+++ b/googleapiclient/discovery_cache/documents/cloudidentity.v1.json
@@ -1273,7 +1273,7 @@
}
}
},
- "revision": "20210913",
+ "revision": "20210920",
"rootUrl": "https://cloudidentity.googleapis.com/",
"schemas": {
"CheckTransitiveMembershipResponse": {
@@ -1335,7 +1335,7 @@
"id": "DynamicGroupQuery",
"properties": {
"query": {
- "description": "Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')`",
+ "description": "Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase('jOhn DoE')`",
"type": "string"
},
"resourceType": {
diff --git a/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json b/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json
index fe419da32..8c1e6d62f 100644
--- a/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json
@@ -1336,7 +1336,7 @@
}
}
},
- "revision": "20210913",
+ "revision": "20210920",
"rootUrl": "https://cloudidentity.googleapis.com/",
"schemas": {
"AndroidAttributes": {
@@ -1943,7 +1943,7 @@
"id": "DynamicGroupQuery",
"properties": {
"query": {
- "description": "Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')`",
+ "description": "Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase('jOhn DoE')`",
"type": "string"
},
"resourceType": {
diff --git a/googleapiclient/discovery_cache/documents/cloudiot.v1.json b/googleapiclient/discovery_cache/documents/cloudiot.v1.json
index 57ed2b474..e4f221b81 100644
--- a/googleapiclient/discovery_cache/documents/cloudiot.v1.json
+++ b/googleapiclient/discovery_cache/documents/cloudiot.v1.json
@@ -938,7 +938,7 @@
}
}
},
- "revision": "20210906",
+ "revision": "20210913",
"rootUrl": "https://cloudiot.googleapis.com/",
"schemas": {
"BindDeviceToGatewayRequest": {
diff --git a/googleapiclient/discovery_cache/documents/cloudkms.v1.json b/googleapiclient/discovery_cache/documents/cloudkms.v1.json
index 7dbc5e06c..eed87b9cf 100644
--- a/googleapiclient/discovery_cache/documents/cloudkms.v1.json
+++ b/googleapiclient/discovery_cache/documents/cloudkms.v1.json
@@ -1346,7 +1346,7 @@
}
}
},
- "revision": "20210906",
+ "revision": "20210921",
"rootUrl": "https://cloudkms.googleapis.com/",
"schemas": {
"AsymmetricDecryptRequest": {
diff --git a/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json b/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json
index d7486213a..cb5ff922b 100644
--- a/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json
+++ b/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json
@@ -216,7 +216,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210919",
"rootUrl": "https://cloudprofiler.googleapis.com/",
"schemas": {
"CreateProfileRequest": {
diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1.json
index 8fc4a6676..ff73093c6 100644
--- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1.json
+++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1.json
@@ -1171,7 +1171,7 @@
}
}
},
- "revision": "20210905",
+ "revision": "20210917",
"rootUrl": "https://cloudresourcemanager.googleapis.com/",
"schemas": {
"Ancestor": {
diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1beta1.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1beta1.json
index 8934404a7..660f902f3 100644
--- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1beta1.json
@@ -566,7 +566,7 @@
}
}
},
- "revision": "20210905",
+ "revision": "20210917",
"rootUrl": "https://cloudresourcemanager.googleapis.com/",
"schemas": {
"Ancestor": {
diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2.json
index f1d7f539d..7f53ee76b 100644
--- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2.json
+++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2.json
@@ -450,7 +450,7 @@
}
}
},
- "revision": "20210905",
+ "revision": "20210917",
"rootUrl": "https://cloudresourcemanager.googleapis.com/",
"schemas": {
"AuditConfig": {
diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json
index 42b546d6e..4416b882c 100644
--- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json
+++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json
@@ -450,7 +450,7 @@
}
}
},
- "revision": "20210905",
+ "revision": "20210917",
"rootUrl": "https://cloudresourcemanager.googleapis.com/",
"schemas": {
"AuditConfig": {
diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v3.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v3.json
index 45f8f6025..4aa7fec84 100644
--- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v3.json
+++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v3.json
@@ -1612,7 +1612,7 @@
}
}
},
- "revision": "20210905",
+ "revision": "20210917",
"rootUrl": "https://cloudresourcemanager.googleapis.com/",
"schemas": {
"AuditConfig": {
diff --git a/googleapiclient/discovery_cache/documents/cloudscheduler.v1.json b/googleapiclient/discovery_cache/documents/cloudscheduler.v1.json
index 6903a36df..f410eac4b 100644
--- a/googleapiclient/discovery_cache/documents/cloudscheduler.v1.json
+++ b/googleapiclient/discovery_cache/documents/cloudscheduler.v1.json
@@ -418,7 +418,7 @@
}
}
},
- "revision": "20210827",
+ "revision": "20210914",
"rootUrl": "https://cloudscheduler.googleapis.com/",
"schemas": {
"AppEngineHttpTarget": {
diff --git a/googleapiclient/discovery_cache/documents/cloudscheduler.v1beta1.json b/googleapiclient/discovery_cache/documents/cloudscheduler.v1beta1.json
index bf3416ac6..5dcb9267b 100644
--- a/googleapiclient/discovery_cache/documents/cloudscheduler.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/cloudscheduler.v1beta1.json
@@ -433,7 +433,7 @@
}
}
},
- "revision": "20210827",
+ "revision": "20210914",
"rootUrl": "https://cloudscheduler.googleapis.com/",
"schemas": {
"AppEngineHttpTarget": {
diff --git a/googleapiclient/discovery_cache/documents/cloudshell.v1.json b/googleapiclient/discovery_cache/documents/cloudshell.v1.json
index 2327cdc6f..08b6888b2 100644
--- a/googleapiclient/discovery_cache/documents/cloudshell.v1.json
+++ b/googleapiclient/discovery_cache/documents/cloudshell.v1.json
@@ -374,7 +374,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210918",
"rootUrl": "https://cloudshell.googleapis.com/",
"schemas": {
"AddPublicKeyMetadata": {
diff --git a/googleapiclient/discovery_cache/documents/cloudtasks.v2beta2.json b/googleapiclient/discovery_cache/documents/cloudtasks.v2beta2.json
index f078e14af..9bc6c130b 100644
--- a/googleapiclient/discovery_cache/documents/cloudtasks.v2beta2.json
+++ b/googleapiclient/discovery_cache/documents/cloudtasks.v2beta2.json
@@ -809,7 +809,7 @@
}
}
},
- "revision": "20210830",
+ "revision": "20210920",
"rootUrl": "https://cloudtasks.googleapis.com/",
"schemas": {
"AcknowledgeTaskRequest": {
diff --git a/googleapiclient/discovery_cache/documents/cloudtasks.v2beta3.json b/googleapiclient/discovery_cache/documents/cloudtasks.v2beta3.json
index 9fbcfe4a9..19642242e 100644
--- a/googleapiclient/discovery_cache/documents/cloudtasks.v2beta3.json
+++ b/googleapiclient/discovery_cache/documents/cloudtasks.v2beta3.json
@@ -697,7 +697,7 @@
}
}
},
- "revision": "20210830",
+ "revision": "20210920",
"rootUrl": "https://cloudtasks.googleapis.com/",
"schemas": {
"AppEngineHttpQueue": {
diff --git a/googleapiclient/discovery_cache/documents/cloudtrace.v1.json b/googleapiclient/discovery_cache/documents/cloudtrace.v1.json
index 6745d4e3d..3dfe5365e 100644
--- a/googleapiclient/discovery_cache/documents/cloudtrace.v1.json
+++ b/googleapiclient/discovery_cache/documents/cloudtrace.v1.json
@@ -257,7 +257,7 @@
}
}
},
- "revision": "20210827",
+ "revision": "20210917",
"rootUrl": "https://cloudtrace.googleapis.com/",
"schemas": {
"Empty": {
diff --git a/googleapiclient/discovery_cache/documents/cloudtrace.v2.json b/googleapiclient/discovery_cache/documents/cloudtrace.v2.json
index 5ea0fee34..44153725f 100644
--- a/googleapiclient/discovery_cache/documents/cloudtrace.v2.json
+++ b/googleapiclient/discovery_cache/documents/cloudtrace.v2.json
@@ -181,7 +181,7 @@
}
}
},
- "revision": "20210827",
+ "revision": "20210917",
"rootUrl": "https://cloudtrace.googleapis.com/",
"schemas": {
"Annotation": {
diff --git a/googleapiclient/discovery_cache/documents/cloudtrace.v2beta1.json b/googleapiclient/discovery_cache/documents/cloudtrace.v2beta1.json
index 7ade44891..53c03720b 100644
--- a/googleapiclient/discovery_cache/documents/cloudtrace.v2beta1.json
+++ b/googleapiclient/discovery_cache/documents/cloudtrace.v2beta1.json
@@ -273,7 +273,7 @@
}
}
},
- "revision": "20210827",
+ "revision": "20210917",
"rootUrl": "https://cloudtrace.googleapis.com/",
"schemas": {
"Empty": {
diff --git a/googleapiclient/discovery_cache/documents/composer.v1.json b/googleapiclient/discovery_cache/documents/composer.v1.json
index b628aac97..392bf3c13 100644
--- a/googleapiclient/discovery_cache/documents/composer.v1.json
+++ b/googleapiclient/discovery_cache/documents/composer.v1.json
@@ -242,7 +242,7 @@
"type": "string"
},
"updateMask": {
- "description": "Required. A comma-separated list of paths, relative to `Environment`, of fields to update. For example, to set the version of scikit-learn to install in the environment to 0.19.0 and to remove an existing installation of numpy, the `updateMask` parameter would include the following two `paths` values: \"config.softwareConfig.pypiPackages.scikit-learn\" and \"config.softwareConfig.pypiPackages.numpy\". The included patch environment would specify the scikit-learn version as follows: { \"config\":{ \"softwareConfig\":{ \"pypiPackages\":{ \"scikit-learn\":\"==0.19.0\" } } } } Note that in the above example, any existing PyPI packages other than scikit-learn and numpy will be unaffected. Only one update type may be included in a single request's `updateMask`. For example, one cannot update both the PyPI packages and labels in the same request. However, it is possible to update multiple members of a map field simultaneously in the same request. For example, to set the labels \"label1\" and \"label2\" while clearing \"label3\" (assuming it already exists), one can provide the paths \"labels.label1\", \"labels.label2\", and \"labels.label3\" and populate the patch environment as follows: { \"labels\":{ \"label1\":\"new-label1-value\" \"label2\":\"new-label2-value\" } } Note that in the above example, any existing labels that are not included in the `updateMask` will be unaffected. It is also possible to replace an entire map field by providing the map field's path in the `updateMask`. The new value of the field will be that which is provided in the patch environment. For example, to delete all pre-existing user-specified PyPI packages and install botocore at version 1.7.14, the `updateMask` would contain the path \"config.softwareConfig.pypiPackages\", and the patch environment would be the following: { \"config\":{ \"softwareConfig\":{ \"pypiPackages\":{ \"botocore\":\"==1.7.14\" } } } } **Note:** Only the following fields can be updated: * `config.softwareConfig.pypiPackages` * Replace all custom custom PyPI packages. If a replacement package map is not included in `environment`, all custom PyPI packages are cleared. It is an error to provide both this mask and a mask specifying an individual package. * `config.softwareConfig.pypiPackages.`packagename * Update the custom PyPI package *packagename*, preserving other packages. To delete the package, include it in `updateMask`, and omit the mapping for it in `environment.config.softwareConfig.pypiPackages`. It is an error to provide both a mask of this form and the `config.softwareConfig.pypiPackages` mask. * `labels` * Replace all environment labels. If a replacement labels map is not included in `environment`, all labels are cleared. It is an error to provide both this mask and a mask specifying one or more individual labels. * `labels.`labelName * Set the label named *labelName*, while preserving other labels. To delete the label, include it in `updateMask` and omit its mapping in `environment.labels`. It is an error to provide both a mask of this form and the `labels` mask. * `config.nodeCount` * Horizontally scale the number of nodes in the environment. An integer greater than or equal to 3 must be provided in the `config.nodeCount` field. * `config.webServerNetworkAccessControl` * Replace the environment's current `WebServerNetworkAccessControl`. * `config.databaseConfig` * Replace the environment's current `DatabaseConfig`. * `config.webServerConfig` * Replace the environment's current `WebServerConfig`. * `config.softwareConfig.airflowConfigOverrides` * Replace all Apache Airflow config overrides. If a replacement config overrides map is not included in `environment`, all config overrides are cleared. It is an error to provide both this mask and a mask specifying one or more individual config overrides. * `config.softwareConfig.airflowConfigOverrides.`section-name * Override the Apache Airflow config property *name* in the section named *section*, preserving other properties. To delete the property override, include it in `updateMask` and omit its mapping in `environment.config.softwareConfig.airflowConfigOverrides`. It is an error to provide both a mask of this form and the `config.softwareConfig.airflowConfigOverrides` mask. * `config.softwareConfig.envVariables` * Replace all environment variables. If a replacement environment variable map is not included in `environment`, all custom environment variables are cleared. It is an error to provide both this mask and a mask specifying one or more individual environment variables. * `config.softwareConfig.schedulerCount` * Horizontally scale the number of schedulers in Airflow. A positive integer not greater than the number of nodes must be provided in the `config.softwareConfig.schedulerCount` field. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*.",
+ "description": "Required. A comma-separated list of paths, relative to `Environment`, of fields to update. For example, to set the version of scikit-learn to install in the environment to 0.19.0 and to remove an existing installation of numpy, the `updateMask` parameter would include the following two `paths` values: \"config.softwareConfig.pypiPackages.scikit-learn\" and \"config.softwareConfig.pypiPackages.numpy\". The included patch environment would specify the scikit-learn version as follows: { \"config\":{ \"softwareConfig\":{ \"pypiPackages\":{ \"scikit-learn\":\"==0.19.0\" } } } } Note that in the above example, any existing PyPI packages other than scikit-learn and numpy will be unaffected. Only one update type may be included in a single request's `updateMask`. For example, one cannot update both the PyPI packages and labels in the same request. However, it is possible to update multiple members of a map field simultaneously in the same request. For example, to set the labels \"label1\" and \"label2\" while clearing \"label3\" (assuming it already exists), one can provide the paths \"labels.label1\", \"labels.label2\", and \"labels.label3\" and populate the patch environment as follows: { \"labels\":{ \"label1\":\"new-label1-value\" \"label2\":\"new-label2-value\" } } Note that in the above example, any existing labels that are not included in the `updateMask` will be unaffected. It is also possible to replace an entire map field by providing the map field's path in the `updateMask`. The new value of the field will be that which is provided in the patch environment. For example, to delete all pre-existing user-specified PyPI packages and install botocore at version 1.7.14, the `updateMask` would contain the path \"config.softwareConfig.pypiPackages\", and the patch environment would be the following: { \"config\":{ \"softwareConfig\":{ \"pypiPackages\":{ \"botocore\":\"==1.7.14\" } } } } **Note:** Only the following fields can be updated: * `config.softwareConfig.pypiPackages` * Replace all custom custom PyPI packages. If a replacement package map is not included in `environment`, all custom PyPI packages are cleared. It is an error to provide both this mask and a mask specifying an individual package. * `config.softwareConfig.pypiPackages.`packagename * Update the custom PyPI package *packagename*, preserving other packages. To delete the package, include it in `updateMask`, and omit the mapping for it in `environment.config.softwareConfig.pypiPackages`. It is an error to provide both a mask of this form and the `config.softwareConfig.pypiPackages` mask. * `labels` * Replace all environment labels. If a replacement labels map is not included in `environment`, all labels are cleared. It is an error to provide both this mask and a mask specifying one or more individual labels. * `labels.`labelName * Set the label named *labelName*, while preserving other labels. To delete the label, include it in `updateMask` and omit its mapping in `environment.labels`. It is an error to provide both a mask of this form and the `labels` mask. * `config.nodeCount` * Horizontally scale the number of nodes in the environment. An integer greater than or equal to 3 must be provided in the `config.nodeCount` field. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. * `config.webServerNetworkAccessControl` * Replace the environment's current `WebServerNetworkAccessControl`. * `config.databaseConfig` Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. * Replace the environment's current `DatabaseConfig`. * `config.webServerConfig.machineType` * Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. * `config.softwareConfig.airflowConfigOverrides` * Replace all Apache Airflow config overrides. If a replacement config overrides map is not included in `environment`, all config overrides are cleared. It is an error to provide both this mask and a mask specifying one or more individual config overrides. * `config.softwareConfig.airflowConfigOverrides.`section-name * Override the Apache Airflow config property *name* in the section named *section*, preserving other properties. To delete the property override, include it in `updateMask` and omit its mapping in `environment.config.softwareConfig.airflowConfigOverrides`. It is an error to provide both a mask of this form and the `config.softwareConfig.airflowConfigOverrides` mask. * `config.softwareConfig.envVariables` * Replace all environment variables. If a replacement environment variable map is not included in `environment`, all custom environment variables are cleared. It is an error to provide both this mask and a mask specifying one or more individual environment variables. * `config.softwareConfig.schedulerCount` * Horizontally scale the number of schedulers in Airflow. A positive integer not greater than the number of nodes must be provided in the `config.softwareConfig.schedulerCount` field. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*. * `config.databaseConfig.machineType` * Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. * `config.webServerConfig.machineType` * Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.",
"format": "google-fieldmask",
"location": "query",
"type": "string"
@@ -406,7 +406,7 @@
}
}
},
- "revision": "20210913",
+ "revision": "20210922",
"rootUrl": "https://composer.googleapis.com/",
"schemas": {
"AllowedIpRange": {
@@ -468,7 +468,7 @@
"type": "object"
},
"DatabaseConfig": {
- "description": "The configuration of Cloud SQL instance that is used by the Apache Airflow software.",
+ "description": "The configuration of Cloud SQL instance that is used by the Apache Airflow software. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.",
"id": "DatabaseConfig",
"properties": {
"machineType": {
@@ -507,7 +507,7 @@
"type": "object"
},
"EncryptionConfig": {
- "description": "The encryption options for the Cloud Composer environment and its dependencies.",
+ "description": "The encryption options for the Cloud Composer environment and its dependencies.Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.",
"id": "EncryptionConfig",
"properties": {
"kmsKeyName": {
@@ -587,11 +587,11 @@
},
"databaseConfig": {
"$ref": "DatabaseConfig",
- "description": "Optional. The configuration settings for Cloud SQL instance used internally by Apache Airflow software."
+ "description": "Optional. The configuration settings for Cloud SQL instance used internally by Apache Airflow software. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*."
},
"encryptionConfig": {
"$ref": "EncryptionConfig",
- "description": "Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated."
+ "description": "Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*."
},
"gkeCluster": {
"description": "Output only. The Kubernetes Engine cluster used to run this environment.",
@@ -602,7 +602,7 @@
"description": "The configuration used for the Kubernetes Engine cluster."
},
"nodeCount": {
- "description": "The number of nodes in the Kubernetes Engine cluster that will be used to run this environment.",
+ "description": "The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.",
"format": "int32",
"type": "integer"
},
@@ -620,7 +620,7 @@
},
"webServerNetworkAccessControl": {
"$ref": "WebServerNetworkAccessControl",
- "description": "Optional. The network-level access control policy for the Airflow web server. If unspecified, no network-level access restrictions will be applied."
+ "description": "Optional. The network-level access control policy for the Airflow web server. If unspecified, no network-level access restrictions will be applied. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*."
}
},
"type": "object"
@@ -630,23 +630,23 @@
"id": "IPAllocationPolicy",
"properties": {
"clusterIpv4CidrBlock": {
- "description": "Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. This field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.",
+ "description": "Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.",
"type": "string"
},
"clusterSecondaryRangeName": {
- "description": "Optional. The name of the GKE cluster's secondary range used to allocate IP addresses to pods. This field is applicable only when `use_ip_aliases` is true.",
+ "description": "Optional. The name of the GKE cluster's secondary range used to allocate IP addresses to pods. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.",
"type": "string"
},
"servicesIpv4CidrBlock": {
- "description": "Optional. The IP address range of the services IP addresses in this GKE cluster. This field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.",
+ "description": "Optional. The IP address range of the services IP addresses in this GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.",
"type": "string"
},
"servicesSecondaryRangeName": {
- "description": "Optional. The name of the services' secondary range used to allocate IP addresses to the GKE cluster. This field is applicable only when `use_ip_aliases` is true.",
+ "description": "Optional. The name of the services' secondary range used to allocate IP addresses to the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.",
"type": "string"
},
"useIpAliases": {
- "description": "Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created.",
+ "description": "Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters.",
"type": "boolean"
}
},
@@ -745,7 +745,7 @@
"id": "NodeConfig",
"properties": {
"diskSizeGb": {
- "description": "Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated.",
+ "description": "Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.",
"format": "int32",
"type": "integer"
},
@@ -754,11 +754,11 @@
"description": "Optional. The configuration for controlling how IPs are allocated in the GKE cluster."
},
"location": {
- "description": "Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: \"projects/{projectId}/zones/{zoneId}\". This `location` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field.",
+ "description": "Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: \"projects/{projectId}/zones/{zoneId}\". This `location` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.",
"type": "string"
},
"machineType": {
- "description": "Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: \"projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}\". The `machineType` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to \"n1-standard-1\".",
+ "description": "Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: \"projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}\". The `machineType` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to \"n1-standard-1\". This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.",
"type": "string"
},
"network": {
@@ -766,7 +766,7 @@
"type": "string"
},
"oauthScopes": {
- "description": "Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to [\"https://www.googleapis.com/auth/cloud-platform\"]. Cannot be updated.",
+ "description": "Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to [\"https://www.googleapis.com/auth/cloud-platform\"]. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.",
"items": {
"type": "string"
},
@@ -781,7 +781,7 @@
"type": "string"
},
"tags": {
- "description": "Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated.",
+ "description": "Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.",
"items": {
"type": "string"
},
@@ -917,7 +917,7 @@
"type": "string"
},
"enablePrivateEnvironment": {
- "description": "Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true.",
+ "description": "Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.",
"type": "boolean"
},
"privateClusterConfig": {
@@ -925,11 +925,11 @@
"description": "Optional. Configuration for the private GKE cluster for a Private IP Cloud Composer environment."
},
"webServerIpv4CidrBlock": {
- "description": "Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`.",
+ "description": "Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.",
"type": "string"
},
"webServerIpv4ReservedRange": {
- "description": "Output only. The IP range reserved for the tenant project's App Engine VMs.",
+ "description": "Output only. The IP range reserved for the tenant project's App Engine VMs. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.",
"readOnly": true,
"type": "string"
}
@@ -966,7 +966,7 @@
"type": "object"
},
"pythonVersion": {
- "description": "Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to '2' or '3'. If not specified, the default is '3'. Cannot be updated.",
+ "description": "Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to '2' or '3'. If not specified, the default is '3'. Cannot be updated. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use Python major version 3.",
"type": "string"
},
"schedulerCount": {
@@ -1005,7 +1005,7 @@
"type": "object"
},
"WebServerConfig": {
- "description": "The configuration settings for the Airflow web server App Engine instance.",
+ "description": "The configuration settings for the Airflow web server App Engine instance. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*",
"id": "WebServerConfig",
"properties": {
"machineType": {
@@ -1016,7 +1016,7 @@
"type": "object"
},
"WebServerNetworkAccessControl": {
- "description": "Network-level access control policy for the Airflow web server.",
+ "description": "Network-level access control policy for the Airflow web server. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.",
"id": "WebServerNetworkAccessControl",
"properties": {
"allowedIpRanges": {
diff --git a/googleapiclient/discovery_cache/documents/composer.v1beta1.json b/googleapiclient/discovery_cache/documents/composer.v1beta1.json
index 68996e283..7dc3c521c 100644
--- a/googleapiclient/discovery_cache/documents/composer.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/composer.v1beta1.json
@@ -462,7 +462,7 @@
}
}
},
- "revision": "20210913",
+ "revision": "20210922",
"rootUrl": "https://composer.googleapis.com/",
"schemas": {
"AllowedIpRange": {
@@ -727,19 +727,19 @@
"id": "IPAllocationPolicy",
"properties": {
"clusterIpv4CidrBlock": {
- "description": "Optional. The IP address range used to allocate IP addresses to pods in the cluster. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both.",
+ "description": "Optional. The IP address range used to allocate IP addresses to pods in the cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both.",
"type": "string"
},
"clusterSecondaryRangeName": {
- "description": "Optional. The name of the cluster's secondary range used to allocate IP addresses to pods. Specify either `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.",
+ "description": "Optional. The name of the cluster's secondary range used to allocate IP addresses to pods. Specify either `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.",
"type": "string"
},
"servicesIpv4CidrBlock": {
- "description": "Optional. The IP address range of the services IP addresses in this cluster. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `services_secondary_range_name` or `services_ipv4_cidr_block` but not both.",
+ "description": "Optional. The IP address range of the services IP addresses in this cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `services_secondary_range_name` or `services_ipv4_cidr_block` but not both.",
"type": "string"
},
"servicesSecondaryRangeName": {
- "description": "Optional. The name of the services' secondary range used to allocate IP addresses to the cluster. Specify either `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.",
+ "description": "Optional. The name of the services' secondary range used to allocate IP addresses to the cluster. Specify either `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.",
"type": "string"
},
"useIpAliases": {
diff --git a/googleapiclient/discovery_cache/documents/compute.alpha.json b/googleapiclient/discovery_cache/documents/compute.alpha.json
index 535837c13..de25756ec 100644
--- a/googleapiclient/discovery_cache/documents/compute.alpha.json
+++ b/googleapiclient/discovery_cache/documents/compute.alpha.json
@@ -37874,7 +37874,7 @@
}
}
},
- "revision": "20210907",
+ "revision": "20210916",
"rootUrl": "https://compute.googleapis.com/",
"schemas": {
"AcceleratorConfig": {
diff --git a/googleapiclient/discovery_cache/documents/compute.beta.json b/googleapiclient/discovery_cache/documents/compute.beta.json
index 36bddff6e..1d03f83b6 100644
--- a/googleapiclient/discovery_cache/documents/compute.beta.json
+++ b/googleapiclient/discovery_cache/documents/compute.beta.json
@@ -32511,7 +32511,7 @@
}
}
},
- "revision": "20210907",
+ "revision": "20210916",
"rootUrl": "https://compute.googleapis.com/",
"schemas": {
"AcceleratorConfig": {
@@ -35060,6 +35060,7 @@
"description": "Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer.",
"enum": [
"EXTERNAL",
+ "EXTERNAL_MANAGED",
"INTERNAL",
"INTERNAL_MANAGED",
"INTERNAL_SELF_MANAGED",
@@ -35067,6 +35068,7 @@
],
"enumDescriptions": [
"Signifies that this will be used for external HTTP(S), SSL Proxy, TCP Proxy, or Network Load Balancing",
+ "Signifies that this will be used for External Managed HTTP(S), SSL Proxy, or TCP Proxy Load Balancing.",
"Signifies that this will be used for Internal TCP/UDP Load Balancing.",
"Signifies that this will be used for Internal HTTP(S) Load Balancing.",
"Signifies that this will be used by Traffic Director.",
@@ -39257,6 +39259,7 @@
"description": "Specifies the forwarding rule type. For more information about forwarding rules, refer to Forwarding rule concepts.",
"enum": [
"EXTERNAL",
+ "EXTERNAL_MANAGED",
"INTERNAL",
"INTERNAL_MANAGED",
"INTERNAL_SELF_MANAGED",
@@ -39267,6 +39270,7 @@
"",
"",
"",
+ "",
""
],
"type": "string"
diff --git a/googleapiclient/discovery_cache/documents/compute.v1.json b/googleapiclient/discovery_cache/documents/compute.v1.json
index 9537e91a7..37b868e00 100644
--- a/googleapiclient/discovery_cache/documents/compute.v1.json
+++ b/googleapiclient/discovery_cache/documents/compute.v1.json
@@ -28278,7 +28278,7 @@
}
}
},
- "revision": "20210907",
+ "revision": "20210916",
"rootUrl": "https://compute.googleapis.com/",
"schemas": {
"AcceleratorConfig": {
diff --git a/googleapiclient/discovery_cache/documents/container.v1.json b/googleapiclient/discovery_cache/documents/container.v1.json
index 53065bd8a..2d2615ba7 100644
--- a/googleapiclient/discovery_cache/documents/container.v1.json
+++ b/googleapiclient/discovery_cache/documents/container.v1.json
@@ -2459,7 +2459,7 @@
}
}
},
- "revision": "20210902",
+ "revision": "20210910",
"rootUrl": "https://container.googleapis.com/",
"schemas": {
"AcceleratorConfig": {
@@ -4291,12 +4291,12 @@
"type": "boolean"
},
"maxNodeCount": {
- "description": "Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster.",
+ "description": "Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster.",
"format": "int32",
"type": "integer"
},
"minNodeCount": {
- "description": "Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count.",
+ "description": "Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count.",
"format": "int32",
"type": "integer"
}
diff --git a/googleapiclient/discovery_cache/documents/container.v1beta1.json b/googleapiclient/discovery_cache/documents/container.v1beta1.json
index f69239fde..20854492a 100644
--- a/googleapiclient/discovery_cache/documents/container.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/container.v1beta1.json
@@ -2484,7 +2484,7 @@
}
}
},
- "revision": "20210902",
+ "revision": "20210906",
"rootUrl": "https://container.googleapis.com/",
"schemas": {
"AcceleratorConfig": {
@@ -4652,12 +4652,12 @@
"type": "boolean"
},
"maxNodeCount": {
- "description": "Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster.",
+ "description": "Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster.",
"format": "int32",
"type": "integer"
},
"minNodeCount": {
- "description": "Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count.",
+ "description": "Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count.",
"format": "int32",
"type": "integer"
}
diff --git a/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json b/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json
index 2b6923400..fe15c2a0b 100644
--- a/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json
+++ b/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json
@@ -1229,7 +1229,7 @@
}
}
},
- "revision": "20210913",
+ "revision": "20210917",
"rootUrl": "https://containeranalysis.googleapis.com/",
"schemas": {
"Artifact": {
diff --git a/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json b/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json
index 5945d060d..b93cd9a67 100644
--- a/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json
@@ -853,7 +853,7 @@
}
}
},
- "revision": "20210913",
+ "revision": "20210917",
"rootUrl": "https://containeranalysis.googleapis.com/",
"schemas": {
"AliasContext": {
diff --git a/googleapiclient/discovery_cache/documents/content.v2.1.json b/googleapiclient/discovery_cache/documents/content.v2.1.json
index 7b92f650d..67df490ba 100644
--- a/googleapiclient/discovery_cache/documents/content.v2.1.json
+++ b/googleapiclient/discovery_cache/documents/content.v2.1.json
@@ -3067,7 +3067,7 @@
]
},
"captureOrder": {
- "description": "Capture funds from the customer for the current order total. This method should be called after the merchant verifies that they are able and ready to start shipping the order. This method blocks until a response is received from the payment processsor. If this method succeeds, the merchant is guaranteed to receive funds for the order after shipment. If the request fails, it can be retried or the order may be cancelled. This method cannot be called after the entire order is already shipped.",
+ "description": "Capture funds from the customer for the current order total. This method should be called after the merchant verifies that they are able and ready to start shipping the order. This method blocks until a response is received from the payment processsor. If this method succeeds, the merchant is guaranteed to receive funds for the order after shipment. If the request fails, it can be retried or the order may be cancelled. This method cannot be called after the entire order is already shipped. A rejected error code is returned when the payment service provider has declined the charge. This indicates a problem between the PSP and either the merchant's or customer's account. Sometimes this error will be resolved by the customer. We recommend retrying these errors once per day or cancelling the order with reason `failedToCaptureFunds` if the items cannot be held.",
"flatPath": "{merchantId}/orders/{orderId}/captureOrder",
"httpMethod": "POST",
"id": "content.orders.captureOrder",
@@ -5783,7 +5783,7 @@
}
}
},
- "revision": "20210916",
+ "revision": "20210924",
"rootUrl": "https://shoppingcontent.googleapis.com/",
"schemas": {
"Account": {
@@ -5930,7 +5930,7 @@
"type": "string"
},
"phoneNumber": {
- "description": "The phone number of the business.",
+ "description": "! The phone number of the business. This can only be updated if a verified ! phone number is not already set. To replace a verified phone number use ! the `Accounts.requestphoneverification` and ! `Accounts.verifyphonenumber`.",
"type": "string"
}
},
@@ -6517,6 +6517,10 @@
"description": "Action to perform for this link. The `\"request\"` action is only available to select merchants. Acceptable values are: - \"`approve`\" - \"`remove`\" - \"`request`\" ",
"type": "string"
},
+ "eCommercePlatformLinkInfo": {
+ "$ref": "ECommercePlatformLinkInfo",
+ "description": "Additional information required for `eCommercePlatform` link type."
+ },
"linkType": {
"description": "Type of the link between the two accounts. Acceptable values are: - \"`channelPartner`\" - \"`eCommercePlatform`\" - \"`paymentServiceProvider`\" ",
"type": "string"
@@ -7893,6 +7897,17 @@
},
"type": "object"
},
+ "ECommercePlatformLinkInfo": {
+ "description": "Additional information required for E_COMMERCE_PLATFORM link type.",
+ "id": "ECommercePlatformLinkInfo",
+ "properties": {
+ "externalAccountId": {
+ "description": "The id used by the third party service provider to identify the merchant.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"Error": {
"description": "An error returned by the API.",
"id": "Error",
@@ -8849,7 +8864,7 @@
"type": "string"
},
"pickupMethod": {
- "description": "Supported pickup method for this offer. Unless the value is \"not supported\", this field must be submitted together with `pickupSla`. For accepted attribute values, see the local product inventory feed // specification.",
+ "description": "Supported pickup method for this offer. Unless the value is \"not supported\", this field must be submitted together with `pickupSla`. For accepted attribute values, see the local product inventory feed specification.",
"type": "string"
},
"pickupSla": {
@@ -9433,7 +9448,7 @@
"type": "integer"
},
"reason": {
- "description": "The reason for the cancellation. Orders that are canceled with a noInventory reason will lead to the removal of the product from Buy on Google until you make an update to that product. This will not affect your Shopping ads. Acceptable values are: - \"`autoPostInternal`\" - \"`autoPostInvalidBillingAddress`\" - \"`autoPostNoInventory`\" - \"`autoPostPriceError`\" - \"`autoPostUndeliverableShippingAddress`\" - \"`couponAbuse`\" - \"`customerCanceled`\" - \"`customerInitiatedCancel`\" - \"`customerSupportRequested`\" - \"`failToPushOrderGoogleError`\" - \"`failToPushOrderMerchantError`\" - \"`failToPushOrderMerchantFulfillmentError`\" - \"`failToPushOrderToMerchant`\" - \"`failToPushOrderToMerchantOutOfStock`\" - \"`invalidCoupon`\" - \"`malformedShippingAddress`\" - \"`merchantDidNotShipOnTime`\" - \"`noInventory`\" - \"`orderTimeout`\" - \"`other`\" - \"`paymentAbuse`\" - \"`paymentDeclined`\" - \"`priceError`\" - \"`returnRefundAbuse`\" - \"`shippingPriceError`\" - \"`taxError`\" - \"`undeliverableShippingAddress`\" - \"`unsupportedPoBoxAddress`\" ",
+ "description": "The reason for the cancellation. Orders that are canceled with a noInventory reason will lead to the removal of the product from Buy on Google until you make an update to that product. This will not affect your Shopping ads. Acceptable values are: - \"`autoPostInternal`\" - \"`autoPostInvalidBillingAddress`\" - \"`autoPostNoInventory`\" - \"`autoPostPriceError`\" - \"`autoPostUndeliverableShippingAddress`\" - \"`couponAbuse`\" - \"`customerCanceled`\" - \"`customerInitiatedCancel`\" - \"`customerSupportRequested`\" - \"`failToPushOrderGoogleError`\" - \"`failToPushOrderMerchantError`\" - \"`failToPushOrderMerchantFulfillmentError`\" - \"`failToPushOrderToMerchant`\" - \"`failToPushOrderToMerchantOutOfStock`\" - \"`invalidCoupon`\" - \"`malformedShippingAddress`\" - \"`merchantDidNotShipOnTime`\" - \"`noInventory`\" - \"`orderTimeout`\" - \"`other`\" - \"`paymentAbuse`\" - \"`paymentDeclined`\" - \"`priceError`\" - \"`returnRefundAbuse`\" - \"`shippingPriceError`\" - \"`taxError`\" - \"`undeliverableShippingAddress`\" - \"`unsupportedPoBoxAddress`\" - \"`failedToCaptureFunds`\" ",
"type": "string"
},
"reasonText": {
@@ -10745,7 +10760,7 @@
"type": "integer"
},
"reason": {
- "description": "The reason for the cancellation. Acceptable values are: - \"`customerInitiatedCancel`\" - \"`invalidCoupon`\" - \"`malformedShippingAddress`\" - \"`noInventory`\" - \"`other`\" - \"`priceError`\" - \"`shippingPriceError`\" - \"`taxError`\" - \"`undeliverableShippingAddress`\" - \"`unsupportedPoBoxAddress`\" ",
+ "description": "The reason for the cancellation. Acceptable values are: - \"`customerInitiatedCancel`\" - \"`invalidCoupon`\" - \"`malformedShippingAddress`\" - \"`noInventory`\" - \"`other`\" - \"`priceError`\" - \"`shippingPriceError`\" - \"`taxError`\" - \"`undeliverableShippingAddress`\" - \"`unsupportedPoBoxAddress`\" - \"`failedToCaptureFunds`\" ",
"type": "string"
},
"reasonText": {
@@ -10777,7 +10792,7 @@
"type": "string"
},
"reason": {
- "description": "The reason for the cancellation. Acceptable values are: - \"`customerInitiatedCancel`\" - \"`invalidCoupon`\" - \"`malformedShippingAddress`\" - \"`noInventory`\" - \"`other`\" - \"`priceError`\" - \"`shippingPriceError`\" - \"`taxError`\" - \"`undeliverableShippingAddress`\" - \"`unsupportedPoBoxAddress`\" ",
+ "description": "The reason for the cancellation. Acceptable values are: - \"`customerInitiatedCancel`\" - \"`invalidCoupon`\" - \"`malformedShippingAddress`\" - \"`noInventory`\" - \"`other`\" - \"`priceError`\" - \"`shippingPriceError`\" - \"`taxError`\" - \"`undeliverableShippingAddress`\" - \"`unsupportedPoBoxAddress`\" - \"`failedToCaptureFunds`\" ",
"type": "string"
},
"reasonText": {
@@ -13184,6 +13199,20 @@
],
"type": "string"
},
+ "productType": {
+ "description": "Product filter by product type for the promotion.",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "productTypeExclusion": {
+ "description": "Product filter by product type exclusion for the promotion.",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ },
"promotionDestinationIds": {
"description": "Destination ID for the promotion.",
"items": {
diff --git a/googleapiclient/discovery_cache/documents/content.v2.json b/googleapiclient/discovery_cache/documents/content.v2.json
index 3f28ec956..e504b7e60 100644
--- a/googleapiclient/discovery_cache/documents/content.v2.json
+++ b/googleapiclient/discovery_cache/documents/content.v2.json
@@ -3298,7 +3298,7 @@
}
}
},
- "revision": "20210916",
+ "revision": "20210924",
"rootUrl": "https://shoppingcontent.googleapis.com/",
"schemas": {
"Account": {
diff --git a/googleapiclient/discovery_cache/documents/customsearch.v1.json b/googleapiclient/discovery_cache/documents/customsearch.v1.json
index 46f1c5082..b23d33429 100644
--- a/googleapiclient/discovery_cache/documents/customsearch.v1.json
+++ b/googleapiclient/discovery_cache/documents/customsearch.v1.json
@@ -674,7 +674,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://customsearch.googleapis.com/",
"schemas": {
"Promotion": {
diff --git a/googleapiclient/discovery_cache/documents/datacatalog.v1.json b/googleapiclient/discovery_cache/documents/datacatalog.v1.json
index 14d3cdeb3..81612f580 100644
--- a/googleapiclient/discovery_cache/documents/datacatalog.v1.json
+++ b/googleapiclient/discovery_cache/documents/datacatalog.v1.json
@@ -1841,7 +1841,7 @@
}
}
},
- "revision": "20210909",
+ "revision": "20210910",
"rootUrl": "https://datacatalog.googleapis.com/",
"schemas": {
"Binding": {
diff --git a/googleapiclient/discovery_cache/documents/datacatalog.v1beta1.json b/googleapiclient/discovery_cache/documents/datacatalog.v1beta1.json
index 267603efa..ecb49ebed 100644
--- a/googleapiclient/discovery_cache/documents/datacatalog.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/datacatalog.v1beta1.json
@@ -1808,7 +1808,7 @@
}
}
},
- "revision": "20210909",
+ "revision": "20210910",
"rootUrl": "https://datacatalog.googleapis.com/",
"schemas": {
"Binding": {
diff --git a/googleapiclient/discovery_cache/documents/datalabeling.v1beta1.json b/googleapiclient/discovery_cache/documents/datalabeling.v1beta1.json
index 363c85a7c..2476acea9 100644
--- a/googleapiclient/discovery_cache/documents/datalabeling.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/datalabeling.v1beta1.json
@@ -1596,7 +1596,7 @@
}
}
},
- "revision": "20210907",
+ "revision": "20210916",
"rootUrl": "https://datalabeling.googleapis.com/",
"schemas": {
"GoogleCloudDatalabelingV1alpha1CreateInstructionMetadata": {
diff --git a/googleapiclient/discovery_cache/documents/datamigration.v1.json b/googleapiclient/discovery_cache/documents/datamigration.v1.json
index 5f9bb257e..82c153009 100644
--- a/googleapiclient/discovery_cache/documents/datamigration.v1.json
+++ b/googleapiclient/discovery_cache/documents/datamigration.v1.json
@@ -1049,7 +1049,7 @@
}
}
},
- "revision": "20210907",
+ "revision": "20210915",
"rootUrl": "https://datamigration.googleapis.com/",
"schemas": {
"AuditConfig": {
diff --git a/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json b/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json
index 2d00e1ac8..4706561a4 100644
--- a/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json
@@ -1049,7 +1049,7 @@
}
}
},
- "revision": "20210907",
+ "revision": "20210915",
"rootUrl": "https://datamigration.googleapis.com/",
"schemas": {
"AuditConfig": {
diff --git a/googleapiclient/discovery_cache/documents/dataproc.v1.json b/googleapiclient/discovery_cache/documents/dataproc.v1.json
index 2e607b4c4..88808bab2 100644
--- a/googleapiclient/discovery_cache/documents/dataproc.v1.json
+++ b/googleapiclient/discovery_cache/documents/dataproc.v1.json
@@ -2316,7 +2316,7 @@
}
}
},
- "revision": "20210909",
+ "revision": "20210920",
"rootUrl": "https://dataproc.googleapis.com/",
"schemas": {
"AcceleratorConfig": {
diff --git a/googleapiclient/discovery_cache/documents/datastore.v1.json b/googleapiclient/discovery_cache/documents/datastore.v1.json
index 281fe6d28..faa7b42b3 100644
--- a/googleapiclient/discovery_cache/documents/datastore.v1.json
+++ b/googleapiclient/discovery_cache/documents/datastore.v1.json
@@ -626,7 +626,7 @@
}
}
},
- "revision": "20210901",
+ "revision": "20210916",
"rootUrl": "https://datastore.googleapis.com/",
"schemas": {
"AllocateIdsRequest": {
diff --git a/googleapiclient/discovery_cache/documents/datastore.v1beta1.json b/googleapiclient/discovery_cache/documents/datastore.v1beta1.json
index c699ef2f6..dea83d4d5 100644
--- a/googleapiclient/discovery_cache/documents/datastore.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/datastore.v1beta1.json
@@ -168,7 +168,7 @@
}
}
},
- "revision": "20210901",
+ "revision": "20210916",
"rootUrl": "https://datastore.googleapis.com/",
"schemas": {
"GoogleDatastoreAdminV1CommonMetadata": {
diff --git a/googleapiclient/discovery_cache/documents/datastore.v1beta3.json b/googleapiclient/discovery_cache/documents/datastore.v1beta3.json
index b417a07d9..05a2049c8 100644
--- a/googleapiclient/discovery_cache/documents/datastore.v1beta3.json
+++ b/googleapiclient/discovery_cache/documents/datastore.v1beta3.json
@@ -308,7 +308,7 @@
}
}
},
- "revision": "20210901",
+ "revision": "20210916",
"rootUrl": "https://datastore.googleapis.com/",
"schemas": {
"AllocateIdsRequest": {
diff --git a/googleapiclient/discovery_cache/documents/deploymentmanager.alpha.json b/googleapiclient/discovery_cache/documents/deploymentmanager.alpha.json
index 66b183615..18a676437 100644
--- a/googleapiclient/discovery_cache/documents/deploymentmanager.alpha.json
+++ b/googleapiclient/discovery_cache/documents/deploymentmanager.alpha.json
@@ -1588,7 +1588,7 @@
}
}
},
- "revision": "20210909",
+ "revision": "20210923",
"rootUrl": "https://deploymentmanager.googleapis.com/",
"schemas": {
"AsyncOptions": {
diff --git a/googleapiclient/discovery_cache/documents/deploymentmanager.v2.json b/googleapiclient/discovery_cache/documents/deploymentmanager.v2.json
index c4104ed83..359808c76 100644
--- a/googleapiclient/discovery_cache/documents/deploymentmanager.v2.json
+++ b/googleapiclient/discovery_cache/documents/deploymentmanager.v2.json
@@ -988,7 +988,7 @@
}
}
},
- "revision": "20210909",
+ "revision": "20210923",
"rootUrl": "https://deploymentmanager.googleapis.com/",
"schemas": {
"AuditConfig": {
diff --git a/googleapiclient/discovery_cache/documents/deploymentmanager.v2beta.json b/googleapiclient/discovery_cache/documents/deploymentmanager.v2beta.json
index 3649648e5..4b193f1f1 100644
--- a/googleapiclient/discovery_cache/documents/deploymentmanager.v2beta.json
+++ b/googleapiclient/discovery_cache/documents/deploymentmanager.v2beta.json
@@ -1552,7 +1552,7 @@
}
}
},
- "revision": "20210909",
+ "revision": "20210923",
"rootUrl": "https://deploymentmanager.googleapis.com/",
"schemas": {
"AsyncOptions": {
diff --git a/googleapiclient/discovery_cache/documents/dialogflow.v2.json b/googleapiclient/discovery_cache/documents/dialogflow.v2.json
index 87a96c55c..5924ea877 100644
--- a/googleapiclient/discovery_cache/documents/dialogflow.v2.json
+++ b/googleapiclient/discovery_cache/documents/dialogflow.v2.json
@@ -6983,7 +6983,7 @@
}
}
},
- "revision": "20210909",
+ "revision": "20210918",
"rootUrl": "https://dialogflow.googleapis.com/",
"schemas": {
"GoogleCloudDialogflowCxV3AudioInput": {
@@ -7192,6 +7192,35 @@
},
"type": "object"
},
+ "GoogleCloudDialogflowCxV3DeployFlowMetadata": {
+ "description": "Metadata returned for the Environments.DeployFlow long running operation.",
+ "id": "GoogleCloudDialogflowCxV3DeployFlowMetadata",
+ "properties": {
+ "testErrors": {
+ "description": "Errors of running deployment tests.",
+ "items": {
+ "$ref": "GoogleCloudDialogflowCxV3TestError"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3DeployFlowResponse": {
+ "description": "The response message for Environments.DeployFlow.",
+ "id": "GoogleCloudDialogflowCxV3DeployFlowResponse",
+ "properties": {
+ "deployment": {
+ "description": "The name of the flow version Deployment. Format: `projects//locations//agents// environments//deployments/`.",
+ "type": "string"
+ },
+ "environment": {
+ "$ref": "GoogleCloudDialogflowCxV3Environment",
+ "description": "The updated environment where the flow is deployed."
+ }
+ },
+ "type": "object"
+ },
"GoogleCloudDialogflowCxV3DtmfInput": {
"description": "Represents the input for dtmf event.",
"id": "GoogleCloudDialogflowCxV3DtmfInput",
@@ -7207,6 +7236,75 @@
},
"type": "object"
},
+ "GoogleCloudDialogflowCxV3Environment": {
+ "description": "Represents an environment for an agent. You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for testing, development, production, etc.",
+ "id": "GoogleCloudDialogflowCxV3Environment",
+ "properties": {
+ "description": {
+ "description": "The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.",
+ "type": "string"
+ },
+ "displayName": {
+ "description": "Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters.",
+ "type": "string"
+ },
+ "name": {
+ "description": "The name of the environment. Format: `projects//locations//agents//environments/`.",
+ "type": "string"
+ },
+ "testCasesConfig": {
+ "$ref": "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig",
+ "description": "The test cases config for continuous tests of this environment."
+ },
+ "updateTime": {
+ "description": "Output only. Update time of this environment.",
+ "format": "google-datetime",
+ "readOnly": true,
+ "type": "string"
+ },
+ "versionConfigs": {
+ "description": "Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned.",
+ "items": {
+ "$ref": "GoogleCloudDialogflowCxV3EnvironmentVersionConfig"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig": {
+ "description": "The configuration for continuous tests.",
+ "id": "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig",
+ "properties": {
+ "enableContinuousRun": {
+ "description": "Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.",
+ "type": "boolean"
+ },
+ "enablePredeploymentRun": {
+ "description": "Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.",
+ "type": "boolean"
+ },
+ "testCases": {
+ "description": "A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3EnvironmentVersionConfig": {
+ "description": "Configuration for the version.",
+ "id": "GoogleCloudDialogflowCxV3EnvironmentVersionConfig",
+ "properties": {
+ "version": {
+ "description": "Required. Format: projects//locations//agents//flows//versions/.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"GoogleCloudDialogflowCxV3EventHandler": {
"description": "An event handler specifies an event that can be handled during a session. When the specified event happens, the following actions are taken in order: * If there is a `trigger_fulfillment` associated with the event, it will be called. * If there is a `target_page` associated with the event, the session will transition into the specified page. * If there is a `target_flow` associated with the event, the session will transition into the specified flow.",
"id": "GoogleCloudDialogflowCxV3EventHandler",
@@ -8763,6 +8861,35 @@
},
"type": "object"
},
+ "GoogleCloudDialogflowCxV3beta1DeployFlowMetadata": {
+ "description": "Metadata returned for the Environments.DeployFlow long running operation.",
+ "id": "GoogleCloudDialogflowCxV3beta1DeployFlowMetadata",
+ "properties": {
+ "testErrors": {
+ "description": "Errors of running deployment tests.",
+ "items": {
+ "$ref": "GoogleCloudDialogflowCxV3beta1TestError"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3beta1DeployFlowResponse": {
+ "description": "The response message for Environments.DeployFlow.",
+ "id": "GoogleCloudDialogflowCxV3beta1DeployFlowResponse",
+ "properties": {
+ "deployment": {
+ "description": "The name of the flow version deployment. Format: `projects//locations//agents// environments//deployments/`.",
+ "type": "string"
+ },
+ "environment": {
+ "$ref": "GoogleCloudDialogflowCxV3beta1Environment",
+ "description": "The updated environment where the flow is deployed."
+ }
+ },
+ "type": "object"
+ },
"GoogleCloudDialogflowCxV3beta1DtmfInput": {
"description": "Represents the input for dtmf event.",
"id": "GoogleCloudDialogflowCxV3beta1DtmfInput",
@@ -8778,6 +8905,75 @@
},
"type": "object"
},
+ "GoogleCloudDialogflowCxV3beta1Environment": {
+ "description": "Represents an environment for an agent. You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for testing, development, production, etc.",
+ "id": "GoogleCloudDialogflowCxV3beta1Environment",
+ "properties": {
+ "description": {
+ "description": "The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.",
+ "type": "string"
+ },
+ "displayName": {
+ "description": "Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters.",
+ "type": "string"
+ },
+ "name": {
+ "description": "The name of the environment. Format: `projects//locations//agents//environments/`.",
+ "type": "string"
+ },
+ "testCasesConfig": {
+ "$ref": "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig",
+ "description": "The test cases config for continuous tests of this environment."
+ },
+ "updateTime": {
+ "description": "Output only. Update time of this environment.",
+ "format": "google-datetime",
+ "readOnly": true,
+ "type": "string"
+ },
+ "versionConfigs": {
+ "description": "Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned.",
+ "items": {
+ "$ref": "GoogleCloudDialogflowCxV3beta1EnvironmentVersionConfig"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig": {
+ "description": "The configuration for continuous tests.",
+ "id": "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig",
+ "properties": {
+ "enableContinuousRun": {
+ "description": "Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.",
+ "type": "boolean"
+ },
+ "enablePredeploymentRun": {
+ "description": "Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.",
+ "type": "boolean"
+ },
+ "testCases": {
+ "description": "A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3beta1EnvironmentVersionConfig": {
+ "description": "Configuration for the version.",
+ "id": "GoogleCloudDialogflowCxV3beta1EnvironmentVersionConfig",
+ "properties": {
+ "version": {
+ "description": "Required. Format: projects//locations//agents//flows//versions/.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"GoogleCloudDialogflowCxV3beta1EventHandler": {
"description": "An event handler specifies an event that can be handled during a session. When the specified event happens, the following actions are taken in order: * If there is a `trigger_fulfillment` associated with the event, it will be called. * If there is a `target_page` associated with the event, the session will transition into the specified page. * If there is a `target_flow` associated with the event, the session will transition into the specified flow.",
"id": "GoogleCloudDialogflowCxV3beta1EventHandler",
diff --git a/googleapiclient/discovery_cache/documents/dialogflow.v2beta1.json b/googleapiclient/discovery_cache/documents/dialogflow.v2beta1.json
index 9e1b41dc3..7bc18fd16 100644
--- a/googleapiclient/discovery_cache/documents/dialogflow.v2beta1.json
+++ b/googleapiclient/discovery_cache/documents/dialogflow.v2beta1.json
@@ -7315,7 +7315,7 @@
}
}
},
- "revision": "20210909",
+ "revision": "20210918",
"rootUrl": "https://dialogflow.googleapis.com/",
"schemas": {
"GoogleCloudDialogflowCxV3AudioInput": {
@@ -7524,6 +7524,35 @@
},
"type": "object"
},
+ "GoogleCloudDialogflowCxV3DeployFlowMetadata": {
+ "description": "Metadata returned for the Environments.DeployFlow long running operation.",
+ "id": "GoogleCloudDialogflowCxV3DeployFlowMetadata",
+ "properties": {
+ "testErrors": {
+ "description": "Errors of running deployment tests.",
+ "items": {
+ "$ref": "GoogleCloudDialogflowCxV3TestError"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3DeployFlowResponse": {
+ "description": "The response message for Environments.DeployFlow.",
+ "id": "GoogleCloudDialogflowCxV3DeployFlowResponse",
+ "properties": {
+ "deployment": {
+ "description": "The name of the flow version Deployment. Format: `projects//locations//agents// environments//deployments/`.",
+ "type": "string"
+ },
+ "environment": {
+ "$ref": "GoogleCloudDialogflowCxV3Environment",
+ "description": "The updated environment where the flow is deployed."
+ }
+ },
+ "type": "object"
+ },
"GoogleCloudDialogflowCxV3DtmfInput": {
"description": "Represents the input for dtmf event.",
"id": "GoogleCloudDialogflowCxV3DtmfInput",
@@ -7539,6 +7568,75 @@
},
"type": "object"
},
+ "GoogleCloudDialogflowCxV3Environment": {
+ "description": "Represents an environment for an agent. You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for testing, development, production, etc.",
+ "id": "GoogleCloudDialogflowCxV3Environment",
+ "properties": {
+ "description": {
+ "description": "The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.",
+ "type": "string"
+ },
+ "displayName": {
+ "description": "Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters.",
+ "type": "string"
+ },
+ "name": {
+ "description": "The name of the environment. Format: `projects//locations//agents//environments/`.",
+ "type": "string"
+ },
+ "testCasesConfig": {
+ "$ref": "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig",
+ "description": "The test cases config for continuous tests of this environment."
+ },
+ "updateTime": {
+ "description": "Output only. Update time of this environment.",
+ "format": "google-datetime",
+ "readOnly": true,
+ "type": "string"
+ },
+ "versionConfigs": {
+ "description": "Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned.",
+ "items": {
+ "$ref": "GoogleCloudDialogflowCxV3EnvironmentVersionConfig"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig": {
+ "description": "The configuration for continuous tests.",
+ "id": "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig",
+ "properties": {
+ "enableContinuousRun": {
+ "description": "Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.",
+ "type": "boolean"
+ },
+ "enablePredeploymentRun": {
+ "description": "Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.",
+ "type": "boolean"
+ },
+ "testCases": {
+ "description": "A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3EnvironmentVersionConfig": {
+ "description": "Configuration for the version.",
+ "id": "GoogleCloudDialogflowCxV3EnvironmentVersionConfig",
+ "properties": {
+ "version": {
+ "description": "Required. Format: projects//locations//agents//flows//versions/.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"GoogleCloudDialogflowCxV3EventHandler": {
"description": "An event handler specifies an event that can be handled during a session. When the specified event happens, the following actions are taken in order: * If there is a `trigger_fulfillment` associated with the event, it will be called. * If there is a `target_page` associated with the event, the session will transition into the specified page. * If there is a `target_flow` associated with the event, the session will transition into the specified flow.",
"id": "GoogleCloudDialogflowCxV3EventHandler",
@@ -9095,6 +9193,35 @@
},
"type": "object"
},
+ "GoogleCloudDialogflowCxV3beta1DeployFlowMetadata": {
+ "description": "Metadata returned for the Environments.DeployFlow long running operation.",
+ "id": "GoogleCloudDialogflowCxV3beta1DeployFlowMetadata",
+ "properties": {
+ "testErrors": {
+ "description": "Errors of running deployment tests.",
+ "items": {
+ "$ref": "GoogleCloudDialogflowCxV3beta1TestError"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3beta1DeployFlowResponse": {
+ "description": "The response message for Environments.DeployFlow.",
+ "id": "GoogleCloudDialogflowCxV3beta1DeployFlowResponse",
+ "properties": {
+ "deployment": {
+ "description": "The name of the flow version deployment. Format: `projects//locations//agents// environments//deployments/`.",
+ "type": "string"
+ },
+ "environment": {
+ "$ref": "GoogleCloudDialogflowCxV3beta1Environment",
+ "description": "The updated environment where the flow is deployed."
+ }
+ },
+ "type": "object"
+ },
"GoogleCloudDialogflowCxV3beta1DtmfInput": {
"description": "Represents the input for dtmf event.",
"id": "GoogleCloudDialogflowCxV3beta1DtmfInput",
@@ -9110,6 +9237,75 @@
},
"type": "object"
},
+ "GoogleCloudDialogflowCxV3beta1Environment": {
+ "description": "Represents an environment for an agent. You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for testing, development, production, etc.",
+ "id": "GoogleCloudDialogflowCxV3beta1Environment",
+ "properties": {
+ "description": {
+ "description": "The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.",
+ "type": "string"
+ },
+ "displayName": {
+ "description": "Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters.",
+ "type": "string"
+ },
+ "name": {
+ "description": "The name of the environment. Format: `projects//locations//agents//environments/`.",
+ "type": "string"
+ },
+ "testCasesConfig": {
+ "$ref": "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig",
+ "description": "The test cases config for continuous tests of this environment."
+ },
+ "updateTime": {
+ "description": "Output only. Update time of this environment.",
+ "format": "google-datetime",
+ "readOnly": true,
+ "type": "string"
+ },
+ "versionConfigs": {
+ "description": "Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned.",
+ "items": {
+ "$ref": "GoogleCloudDialogflowCxV3beta1EnvironmentVersionConfig"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig": {
+ "description": "The configuration for continuous tests.",
+ "id": "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig",
+ "properties": {
+ "enableContinuousRun": {
+ "description": "Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.",
+ "type": "boolean"
+ },
+ "enablePredeploymentRun": {
+ "description": "Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.",
+ "type": "boolean"
+ },
+ "testCases": {
+ "description": "A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3beta1EnvironmentVersionConfig": {
+ "description": "Configuration for the version.",
+ "id": "GoogleCloudDialogflowCxV3beta1EnvironmentVersionConfig",
+ "properties": {
+ "version": {
+ "description": "Required. Format: projects//locations//agents//flows//versions/.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"GoogleCloudDialogflowCxV3beta1EventHandler": {
"description": "An event handler specifies an event that can be handled during a session. When the specified event happens, the following actions are taken in order: * If there is a `trigger_fulfillment` associated with the event, it will be called. * If there is a `target_page` associated with the event, the session will transition into the specified page. * If there is a `target_flow` associated with the event, the session will transition into the specified flow.",
"id": "GoogleCloudDialogflowCxV3beta1EventHandler",
@@ -15987,6 +16183,10 @@
"description": "Returns a response containing a custom, platform-specific payload.",
"type": "object"
},
+ "telephonyTransferCall": {
+ "$ref": "GoogleCloudDialogflowV2beta1ResponseMessageTelephonyTransferCall",
+ "description": "A signal that the client should transfer the phone call connected to this agent to a third-party endpoint."
+ },
"text": {
"$ref": "GoogleCloudDialogflowV2beta1ResponseMessageText",
"description": "Returns a text response."
@@ -16015,6 +16215,21 @@
},
"type": "object"
},
+ "GoogleCloudDialogflowV2beta1ResponseMessageTelephonyTransferCall": {
+ "description": "Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint.",
+ "id": "GoogleCloudDialogflowV2beta1ResponseMessageTelephonyTransferCall",
+ "properties": {
+ "phoneNumber": {
+ "description": "Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164).",
+ "type": "string"
+ },
+ "sipUri": {
+ "description": "Transfer the call to a SIP endpoint.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"GoogleCloudDialogflowV2beta1ResponseMessageText": {
"description": "The text response message.",
"id": "GoogleCloudDialogflowV2beta1ResponseMessageText",
diff --git a/googleapiclient/discovery_cache/documents/dialogflow.v3.json b/googleapiclient/discovery_cache/documents/dialogflow.v3.json
index 740654b1b..2686ea111 100644
--- a/googleapiclient/discovery_cache/documents/dialogflow.v3.json
+++ b/googleapiclient/discovery_cache/documents/dialogflow.v3.json
@@ -696,6 +696,35 @@
"https://www.googleapis.com/auth/dialogflow"
]
},
+ "deployFlow": {
+ "description": "Deploys a flow to the specified Environment. This method is a [long-running operation](https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation). The returned `Operation` type has the following method-specific fields: - `metadata`: DeployFlowMetadata - `response`: DeployFlowResponse",
+ "flatPath": "v3/projects/{projectsId}/locations/{locationsId}/agents/{agentsId}/environments/{environmentsId}:deployFlow",
+ "httpMethod": "POST",
+ "id": "dialogflow.projects.locations.agents.environments.deployFlow",
+ "parameterOrder": [
+ "environment"
+ ],
+ "parameters": {
+ "environment": {
+ "description": "Required. The environment to deploy the flow to. Format: `projects//locations//agents// environments/`.",
+ "location": "path",
+ "pattern": "^projects/[^/]+/locations/[^/]+/agents/[^/]+/environments/[^/]+$",
+ "required": true,
+ "type": "string"
+ }
+ },
+ "path": "v3/{+environment}:deployFlow",
+ "request": {
+ "$ref": "GoogleCloudDialogflowCxV3DeployFlowRequest"
+ },
+ "response": {
+ "$ref": "GoogleLongrunningOperation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/dialogflow"
+ ]
+ },
"get": {
"description": "Retrieves the specified Environment.",
"flatPath": "v3/projects/{projectsId}/locations/{locationsId}/agents/{agentsId}/environments/{environmentsId}",
@@ -903,6 +932,73 @@
}
}
},
+ "deployments": {
+ "methods": {
+ "get": {
+ "description": "Retrieves the specified Deployment.",
+ "flatPath": "v3/projects/{projectsId}/locations/{locationsId}/agents/{agentsId}/environments/{environmentsId}/deployments/{deploymentsId}",
+ "httpMethod": "GET",
+ "id": "dialogflow.projects.locations.agents.environments.deployments.get",
+ "parameterOrder": [
+ "name"
+ ],
+ "parameters": {
+ "name": {
+ "description": "Required. The name of the Deployment. Format: `projects//locations//agents//environments//deployments/`.",
+ "location": "path",
+ "pattern": "^projects/[^/]+/locations/[^/]+/agents/[^/]+/environments/[^/]+/deployments/[^/]+$",
+ "required": true,
+ "type": "string"
+ }
+ },
+ "path": "v3/{+name}",
+ "response": {
+ "$ref": "GoogleCloudDialogflowCxV3Deployment"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/dialogflow"
+ ]
+ },
+ "list": {
+ "description": "Returns the list of all deployments in the specified Environment.",
+ "flatPath": "v3/projects/{projectsId}/locations/{locationsId}/agents/{agentsId}/environments/{environmentsId}/deployments",
+ "httpMethod": "GET",
+ "id": "dialogflow.projects.locations.agents.environments.deployments.list",
+ "parameterOrder": [
+ "parent"
+ ],
+ "parameters": {
+ "pageSize": {
+ "description": "The maximum number of items to return in a single page. By default 20 and at most 100.",
+ "format": "int32",
+ "location": "query",
+ "type": "integer"
+ },
+ "pageToken": {
+ "description": "The next_page_token value returned from a previous list request.",
+ "location": "query",
+ "type": "string"
+ },
+ "parent": {
+ "description": "Required. The Environment to list all environments for. Format: `projects//locations//agents//environments/`.",
+ "location": "path",
+ "pattern": "^projects/[^/]+/locations/[^/]+/agents/[^/]+/environments/[^/]+$",
+ "required": true,
+ "type": "string"
+ }
+ },
+ "path": "v3/{+parent}/deployments",
+ "response": {
+ "$ref": "GoogleCloudDialogflowCxV3ListDeploymentsResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/dialogflow"
+ ]
+ }
+ }
+ },
"experiments": {
"methods": {
"create": {
@@ -3493,7 +3589,7 @@
],
"parameters": {
"name": {
- "description": "Required. Resource name of the settings. Format: `projects//locations//securitySettings/`.",
+ "description": "Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`.",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+/securitySettings/[^/]+$",
"required": true,
@@ -3623,7 +3719,7 @@
}
}
},
- "revision": "20210909",
+ "revision": "20210918",
"rootUrl": "https://dialogflow.googleapis.com/",
"schemas": {
"GoogleCloudDialogflowCxV3AdvancedSettings": {
@@ -3993,6 +4089,109 @@
},
"type": "object"
},
+ "GoogleCloudDialogflowCxV3DeployFlowMetadata": {
+ "description": "Metadata returned for the Environments.DeployFlow long running operation.",
+ "id": "GoogleCloudDialogflowCxV3DeployFlowMetadata",
+ "properties": {
+ "testErrors": {
+ "description": "Errors of running deployment tests.",
+ "items": {
+ "$ref": "GoogleCloudDialogflowCxV3TestError"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3DeployFlowRequest": {
+ "description": "The request message for Environments.DeployFlow.",
+ "id": "GoogleCloudDialogflowCxV3DeployFlowRequest",
+ "properties": {
+ "flowVersion": {
+ "description": "Required. The flow version to deploy. Format: `projects//locations//agents// flows//versions/`.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3DeployFlowResponse": {
+ "description": "The response message for Environments.DeployFlow.",
+ "id": "GoogleCloudDialogflowCxV3DeployFlowResponse",
+ "properties": {
+ "deployment": {
+ "description": "The name of the flow version Deployment. Format: `projects//locations//agents// environments//deployments/`.",
+ "type": "string"
+ },
+ "environment": {
+ "$ref": "GoogleCloudDialogflowCxV3Environment",
+ "description": "The updated environment where the flow is deployed."
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3Deployment": {
+ "description": "Represents an deployment in an environment. A deployment happens when a flow version configured to be active in the environment. You can configure running pre-deployment steps, e.g. running validation test cases, experiment auto-rollout, etc.",
+ "id": "GoogleCloudDialogflowCxV3Deployment",
+ "properties": {
+ "endTime": {
+ "description": "End time of this deployment.",
+ "format": "google-datetime",
+ "type": "string"
+ },
+ "flowVersion": {
+ "description": "The name of the flow version for this deployment. Format: projects//locations//agents//flows//versions/.",
+ "type": "string"
+ },
+ "name": {
+ "description": "The name of the deployment. Format: projects//locations//agents//environments//deployments/.",
+ "type": "string"
+ },
+ "result": {
+ "$ref": "GoogleCloudDialogflowCxV3DeploymentResult",
+ "description": "Result of the deployment."
+ },
+ "startTime": {
+ "description": "Start time of this deployment.",
+ "format": "google-datetime",
+ "type": "string"
+ },
+ "state": {
+ "description": "The current state of the deployment.",
+ "enum": [
+ "STATE_UNSPECIFIED",
+ "RUNNING",
+ "SUCCEEDED",
+ "FAILED"
+ ],
+ "enumDescriptions": [
+ "State unspecified.",
+ "The deployment is running.",
+ "The deployment succeeded.",
+ "The deployment failed."
+ ],
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3DeploymentResult": {
+ "description": "Result of the deployment.",
+ "id": "GoogleCloudDialogflowCxV3DeploymentResult",
+ "properties": {
+ "deploymentTestResults": {
+ "description": "Results of test cases running before the deployment. Format: `projects//locations//agents//testCases//results/`.",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "experiment": {
+ "description": "The name of the experiment triggered by this deployment. Format: projects//locations//agents//environments//experiments/.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"GoogleCloudDialogflowCxV3DetectIntentRequest": {
"description": "The request to detect user's intent.",
"id": "GoogleCloudDialogflowCxV3DetectIntentRequest",
@@ -4179,6 +4378,10 @@
"description": "The name of the environment. Format: `projects//locations//agents//environments/`.",
"type": "string"
},
+ "testCasesConfig": {
+ "$ref": "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig",
+ "description": "The test cases config for continuous tests of this environment."
+ },
"updateTime": {
"description": "Output only. Update time of this environment.",
"format": "google-datetime",
@@ -4195,6 +4398,28 @@
},
"type": "object"
},
+ "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig": {
+ "description": "The configuration for continuous tests.",
+ "id": "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig",
+ "properties": {
+ "enableContinuousRun": {
+ "description": "Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.",
+ "type": "boolean"
+ },
+ "enablePredeploymentRun": {
+ "description": "Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.",
+ "type": "boolean"
+ },
+ "testCases": {
+ "description": "A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
"GoogleCloudDialogflowCxV3EnvironmentVersionConfig": {
"description": "Configuration for the version.",
"id": "GoogleCloudDialogflowCxV3EnvironmentVersionConfig",
@@ -5263,6 +5488,24 @@
},
"type": "object"
},
+ "GoogleCloudDialogflowCxV3ListDeploymentsResponse": {
+ "description": "The response message for Deployments.ListDeployments.",
+ "id": "GoogleCloudDialogflowCxV3ListDeploymentsResponse",
+ "properties": {
+ "deployments": {
+ "description": "The list of deployments. There will be a maximum number of items returned based on the page_size field in the request. The list may in some cases be empty or contain fewer entries than page_size even if this isn't the last page.",
+ "items": {
+ "$ref": "GoogleCloudDialogflowCxV3Deployment"
+ },
+ "type": "array"
+ },
+ "nextPageToken": {
+ "description": "Token to retrieve the next page of results, or empty if there are no more results in the list.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"GoogleCloudDialogflowCxV3ListEntityTypesResponse": {
"description": "The response message for EntityTypes.ListEntityTypes.",
"id": "GoogleCloudDialogflowCxV3ListEntityTypesResponse",
@@ -6372,7 +6615,7 @@
"type": "string"
},
"name": {
- "description": "Required. Resource name of the settings. Format: `projects//locations//securitySettings/`.",
+ "description": "Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`.",
"type": "string"
},
"purgeDataTypes": {
@@ -7632,6 +7875,35 @@
},
"type": "object"
},
+ "GoogleCloudDialogflowCxV3beta1DeployFlowMetadata": {
+ "description": "Metadata returned for the Environments.DeployFlow long running operation.",
+ "id": "GoogleCloudDialogflowCxV3beta1DeployFlowMetadata",
+ "properties": {
+ "testErrors": {
+ "description": "Errors of running deployment tests.",
+ "items": {
+ "$ref": "GoogleCloudDialogflowCxV3beta1TestError"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3beta1DeployFlowResponse": {
+ "description": "The response message for Environments.DeployFlow.",
+ "id": "GoogleCloudDialogflowCxV3beta1DeployFlowResponse",
+ "properties": {
+ "deployment": {
+ "description": "The name of the flow version deployment. Format: `projects//locations//agents// environments//deployments/`.",
+ "type": "string"
+ },
+ "environment": {
+ "$ref": "GoogleCloudDialogflowCxV3beta1Environment",
+ "description": "The updated environment where the flow is deployed."
+ }
+ },
+ "type": "object"
+ },
"GoogleCloudDialogflowCxV3beta1DtmfInput": {
"description": "Represents the input for dtmf event.",
"id": "GoogleCloudDialogflowCxV3beta1DtmfInput",
@@ -7647,6 +7919,75 @@
},
"type": "object"
},
+ "GoogleCloudDialogflowCxV3beta1Environment": {
+ "description": "Represents an environment for an agent. You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for testing, development, production, etc.",
+ "id": "GoogleCloudDialogflowCxV3beta1Environment",
+ "properties": {
+ "description": {
+ "description": "The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.",
+ "type": "string"
+ },
+ "displayName": {
+ "description": "Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters.",
+ "type": "string"
+ },
+ "name": {
+ "description": "The name of the environment. Format: `projects//locations//agents//environments/`.",
+ "type": "string"
+ },
+ "testCasesConfig": {
+ "$ref": "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig",
+ "description": "The test cases config for continuous tests of this environment."
+ },
+ "updateTime": {
+ "description": "Output only. Update time of this environment.",
+ "format": "google-datetime",
+ "readOnly": true,
+ "type": "string"
+ },
+ "versionConfigs": {
+ "description": "Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned.",
+ "items": {
+ "$ref": "GoogleCloudDialogflowCxV3beta1EnvironmentVersionConfig"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig": {
+ "description": "The configuration for continuous tests.",
+ "id": "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig",
+ "properties": {
+ "enableContinuousRun": {
+ "description": "Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.",
+ "type": "boolean"
+ },
+ "enablePredeploymentRun": {
+ "description": "Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.",
+ "type": "boolean"
+ },
+ "testCases": {
+ "description": "A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3beta1EnvironmentVersionConfig": {
+ "description": "Configuration for the version.",
+ "id": "GoogleCloudDialogflowCxV3beta1EnvironmentVersionConfig",
+ "properties": {
+ "version": {
+ "description": "Required. Format: projects//locations//agents//flows//versions/.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"GoogleCloudDialogflowCxV3beta1EventHandler": {
"description": "An event handler specifies an event that can be handled during a session. When the specified event happens, the following actions are taken in order: * If there is a `trigger_fulfillment` associated with the event, it will be called. * If there is a `target_page` associated with the event, the session will transition into the specified page. * If there is a `target_flow` associated with the event, the session will transition into the specified flow.",
"id": "GoogleCloudDialogflowCxV3beta1EventHandler",
diff --git a/googleapiclient/discovery_cache/documents/dialogflow.v3beta1.json b/googleapiclient/discovery_cache/documents/dialogflow.v3beta1.json
index e259f4225..232349a36 100644
--- a/googleapiclient/discovery_cache/documents/dialogflow.v3beta1.json
+++ b/googleapiclient/discovery_cache/documents/dialogflow.v3beta1.json
@@ -696,6 +696,35 @@
"https://www.googleapis.com/auth/dialogflow"
]
},
+ "deployFlow": {
+ "description": "Deploys a flow to the specified Environment. This method is a [long-running operation](https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation). The returned `Operation` type has the following method-specific fields: - `metadata`: DeployFlowMetadata - `response`: DeployFlowResponse",
+ "flatPath": "v3beta1/projects/{projectsId}/locations/{locationsId}/agents/{agentsId}/environments/{environmentsId}:deployFlow",
+ "httpMethod": "POST",
+ "id": "dialogflow.projects.locations.agents.environments.deployFlow",
+ "parameterOrder": [
+ "environment"
+ ],
+ "parameters": {
+ "environment": {
+ "description": "Required. The environment to deploy the flow to. Format: `projects//locations//agents// environments/`.",
+ "location": "path",
+ "pattern": "^projects/[^/]+/locations/[^/]+/agents/[^/]+/environments/[^/]+$",
+ "required": true,
+ "type": "string"
+ }
+ },
+ "path": "v3beta1/{+environment}:deployFlow",
+ "request": {
+ "$ref": "GoogleCloudDialogflowCxV3beta1DeployFlowRequest"
+ },
+ "response": {
+ "$ref": "GoogleLongrunningOperation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/dialogflow"
+ ]
+ },
"get": {
"description": "Retrieves the specified Environment.",
"flatPath": "v3beta1/projects/{projectsId}/locations/{locationsId}/agents/{agentsId}/environments/{environmentsId}",
@@ -903,6 +932,73 @@
}
}
},
+ "deployments": {
+ "methods": {
+ "get": {
+ "description": "Retrieves the specified Deployment.",
+ "flatPath": "v3beta1/projects/{projectsId}/locations/{locationsId}/agents/{agentsId}/environments/{environmentsId}/deployments/{deploymentsId}",
+ "httpMethod": "GET",
+ "id": "dialogflow.projects.locations.agents.environments.deployments.get",
+ "parameterOrder": [
+ "name"
+ ],
+ "parameters": {
+ "name": {
+ "description": "Required. The name of the Deployment. Format: `projects//locations//agents//environments//deployments/`.",
+ "location": "path",
+ "pattern": "^projects/[^/]+/locations/[^/]+/agents/[^/]+/environments/[^/]+/deployments/[^/]+$",
+ "required": true,
+ "type": "string"
+ }
+ },
+ "path": "v3beta1/{+name}",
+ "response": {
+ "$ref": "GoogleCloudDialogflowCxV3beta1Deployment"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/dialogflow"
+ ]
+ },
+ "list": {
+ "description": "Returns the list of all deployments in the specified Environment.",
+ "flatPath": "v3beta1/projects/{projectsId}/locations/{locationsId}/agents/{agentsId}/environments/{environmentsId}/deployments",
+ "httpMethod": "GET",
+ "id": "dialogflow.projects.locations.agents.environments.deployments.list",
+ "parameterOrder": [
+ "parent"
+ ],
+ "parameters": {
+ "pageSize": {
+ "description": "The maximum number of items to return in a single page. By default 20 and at most 100.",
+ "format": "int32",
+ "location": "query",
+ "type": "integer"
+ },
+ "pageToken": {
+ "description": "The next_page_token value returned from a previous list request.",
+ "location": "query",
+ "type": "string"
+ },
+ "parent": {
+ "description": "Required. The Environment to list all environments for. Format: `projects//locations//agents//environments/`.",
+ "location": "path",
+ "pattern": "^projects/[^/]+/locations/[^/]+/agents/[^/]+/environments/[^/]+$",
+ "required": true,
+ "type": "string"
+ }
+ },
+ "path": "v3beta1/{+parent}/deployments",
+ "response": {
+ "$ref": "GoogleCloudDialogflowCxV3beta1ListDeploymentsResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/dialogflow"
+ ]
+ }
+ }
+ },
"experiments": {
"methods": {
"create": {
@@ -3493,7 +3589,7 @@
],
"parameters": {
"name": {
- "description": "Required. Resource name of the settings. Format: `projects//locations//securitySettings/`.",
+ "description": "Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`.",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+/securitySettings/[^/]+$",
"required": true,
@@ -3623,7 +3719,7 @@
}
}
},
- "revision": "20210909",
+ "revision": "20210918",
"rootUrl": "https://dialogflow.googleapis.com/",
"schemas": {
"GoogleCloudDialogflowCxV3AudioInput": {
@@ -3832,6 +3928,35 @@
},
"type": "object"
},
+ "GoogleCloudDialogflowCxV3DeployFlowMetadata": {
+ "description": "Metadata returned for the Environments.DeployFlow long running operation.",
+ "id": "GoogleCloudDialogflowCxV3DeployFlowMetadata",
+ "properties": {
+ "testErrors": {
+ "description": "Errors of running deployment tests.",
+ "items": {
+ "$ref": "GoogleCloudDialogflowCxV3TestError"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3DeployFlowResponse": {
+ "description": "The response message for Environments.DeployFlow.",
+ "id": "GoogleCloudDialogflowCxV3DeployFlowResponse",
+ "properties": {
+ "deployment": {
+ "description": "The name of the flow version Deployment. Format: `projects//locations//agents// environments//deployments/`.",
+ "type": "string"
+ },
+ "environment": {
+ "$ref": "GoogleCloudDialogflowCxV3Environment",
+ "description": "The updated environment where the flow is deployed."
+ }
+ },
+ "type": "object"
+ },
"GoogleCloudDialogflowCxV3DtmfInput": {
"description": "Represents the input for dtmf event.",
"id": "GoogleCloudDialogflowCxV3DtmfInput",
@@ -3847,6 +3972,75 @@
},
"type": "object"
},
+ "GoogleCloudDialogflowCxV3Environment": {
+ "description": "Represents an environment for an agent. You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for testing, development, production, etc.",
+ "id": "GoogleCloudDialogflowCxV3Environment",
+ "properties": {
+ "description": {
+ "description": "The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.",
+ "type": "string"
+ },
+ "displayName": {
+ "description": "Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters.",
+ "type": "string"
+ },
+ "name": {
+ "description": "The name of the environment. Format: `projects//locations//agents//environments/`.",
+ "type": "string"
+ },
+ "testCasesConfig": {
+ "$ref": "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig",
+ "description": "The test cases config for continuous tests of this environment."
+ },
+ "updateTime": {
+ "description": "Output only. Update time of this environment.",
+ "format": "google-datetime",
+ "readOnly": true,
+ "type": "string"
+ },
+ "versionConfigs": {
+ "description": "Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned.",
+ "items": {
+ "$ref": "GoogleCloudDialogflowCxV3EnvironmentVersionConfig"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig": {
+ "description": "The configuration for continuous tests.",
+ "id": "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig",
+ "properties": {
+ "enableContinuousRun": {
+ "description": "Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.",
+ "type": "boolean"
+ },
+ "enablePredeploymentRun": {
+ "description": "Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.",
+ "type": "boolean"
+ },
+ "testCases": {
+ "description": "A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3EnvironmentVersionConfig": {
+ "description": "Configuration for the version.",
+ "id": "GoogleCloudDialogflowCxV3EnvironmentVersionConfig",
+ "properties": {
+ "version": {
+ "description": "Required. Format: projects//locations//agents//flows//versions/.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"GoogleCloudDialogflowCxV3EventHandler": {
"description": "An event handler specifies an event that can be handled during a session. When the specified event happens, the following actions are taken in order: * If there is a `trigger_fulfillment` associated with the event, it will be called. * If there is a `target_page` associated with the event, the session will transition into the specified page. * If there is a `target_flow` associated with the event, the session will transition into the specified flow.",
"id": "GoogleCloudDialogflowCxV3EventHandler",
@@ -5564,6 +5758,109 @@
},
"type": "object"
},
+ "GoogleCloudDialogflowCxV3beta1DeployFlowMetadata": {
+ "description": "Metadata returned for the Environments.DeployFlow long running operation.",
+ "id": "GoogleCloudDialogflowCxV3beta1DeployFlowMetadata",
+ "properties": {
+ "testErrors": {
+ "description": "Errors of running deployment tests.",
+ "items": {
+ "$ref": "GoogleCloudDialogflowCxV3beta1TestError"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3beta1DeployFlowRequest": {
+ "description": "The request message for Environments.DeployFlow.",
+ "id": "GoogleCloudDialogflowCxV3beta1DeployFlowRequest",
+ "properties": {
+ "flowVersion": {
+ "description": "Required. The flow version to deploy. Format: `projects//locations//agents// flows//versions/`.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3beta1DeployFlowResponse": {
+ "description": "The response message for Environments.DeployFlow.",
+ "id": "GoogleCloudDialogflowCxV3beta1DeployFlowResponse",
+ "properties": {
+ "deployment": {
+ "description": "The name of the flow version deployment. Format: `projects//locations//agents// environments//deployments/`.",
+ "type": "string"
+ },
+ "environment": {
+ "$ref": "GoogleCloudDialogflowCxV3beta1Environment",
+ "description": "The updated environment where the flow is deployed."
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3beta1Deployment": {
+ "description": "Represents an deployment in an environment. A deployment happens when a flow version configured to be active in the environment. You can configure running pre-deployment steps, e.g. running validation test cases, experiment auto-rollout, etc.",
+ "id": "GoogleCloudDialogflowCxV3beta1Deployment",
+ "properties": {
+ "endTime": {
+ "description": "End time of this deployment.",
+ "format": "google-datetime",
+ "type": "string"
+ },
+ "flowVersion": {
+ "description": "The name of the flow version for this deployment. Format: projects//locations//agents//flows//versions/.",
+ "type": "string"
+ },
+ "name": {
+ "description": "The name of the deployment. Format: projects//locations//agents//environments//deployments/.",
+ "type": "string"
+ },
+ "result": {
+ "$ref": "GoogleCloudDialogflowCxV3beta1DeploymentResult",
+ "description": "Result of the deployment."
+ },
+ "startTime": {
+ "description": "Start time of this deployment.",
+ "format": "google-datetime",
+ "type": "string"
+ },
+ "state": {
+ "description": "The current state of the deployment.",
+ "enum": [
+ "STATE_UNSPECIFIED",
+ "RUNNING",
+ "SUCCEEDED",
+ "FAILED"
+ ],
+ "enumDescriptions": [
+ "State unspecified.",
+ "The deployment is running.",
+ "The deployment succeeded.",
+ "The deployment failed."
+ ],
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudDialogflowCxV3beta1DeploymentResult": {
+ "description": "Result of the deployment.",
+ "id": "GoogleCloudDialogflowCxV3beta1DeploymentResult",
+ "properties": {
+ "deploymentTestResults": {
+ "description": "Results of test cases running before the deployment. Format: `projects//locations//agents//testCases//results/`.",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "experiment": {
+ "description": "The name of the experiment triggered by this deployment. Format: projects//locations//agents//environments//experiments/.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"GoogleCloudDialogflowCxV3beta1DetectIntentRequest": {
"description": "The request to detect user's intent.",
"id": "GoogleCloudDialogflowCxV3beta1DetectIntentRequest",
@@ -5750,6 +6047,10 @@
"description": "The name of the environment. Format: `projects//locations//agents//environments/`.",
"type": "string"
},
+ "testCasesConfig": {
+ "$ref": "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig",
+ "description": "The test cases config for continuous tests of this environment."
+ },
"updateTime": {
"description": "Output only. Update time of this environment.",
"format": "google-datetime",
@@ -5766,6 +6067,28 @@
},
"type": "object"
},
+ "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig": {
+ "description": "The configuration for continuous tests.",
+ "id": "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig",
+ "properties": {
+ "enableContinuousRun": {
+ "description": "Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.",
+ "type": "boolean"
+ },
+ "enablePredeploymentRun": {
+ "description": "Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.",
+ "type": "boolean"
+ },
+ "testCases": {
+ "description": "A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
"GoogleCloudDialogflowCxV3beta1EnvironmentVersionConfig": {
"description": "Configuration for the version.",
"id": "GoogleCloudDialogflowCxV3beta1EnvironmentVersionConfig",
@@ -6834,6 +7157,24 @@
},
"type": "object"
},
+ "GoogleCloudDialogflowCxV3beta1ListDeploymentsResponse": {
+ "description": "The response message for Deployments.ListDeployments.",
+ "id": "GoogleCloudDialogflowCxV3beta1ListDeploymentsResponse",
+ "properties": {
+ "deployments": {
+ "description": "The list of deployments. There will be a maximum number of items returned based on the page_size field in the request. The list may in some cases be empty or contain fewer entries than page_size even if this isn't the last page.",
+ "items": {
+ "$ref": "GoogleCloudDialogflowCxV3beta1Deployment"
+ },
+ "type": "array"
+ },
+ "nextPageToken": {
+ "description": "Token to retrieve the next page of results, or empty if there are no more results in the list.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"GoogleCloudDialogflowCxV3beta1ListEntityTypesResponse": {
"description": "The response message for EntityTypes.ListEntityTypes.",
"id": "GoogleCloudDialogflowCxV3beta1ListEntityTypesResponse",
@@ -7943,7 +8284,7 @@
"type": "string"
},
"name": {
- "description": "Required. Resource name of the settings. Format: `projects//locations//securitySettings/`.",
+ "description": "Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`.",
"type": "string"
},
"purgeDataTypes": {
diff --git a/googleapiclient/discovery_cache/documents/displayvideo.v1.json b/googleapiclient/discovery_cache/documents/displayvideo.v1.json
index cb4d10140..800b12bd3 100644
--- a/googleapiclient/discovery_cache/documents/displayvideo.v1.json
+++ b/googleapiclient/discovery_cache/documents/displayvideo.v1.json
@@ -7317,7 +7317,7 @@
}
}
},
- "revision": "20210920",
+ "revision": "20210923",
"rootUrl": "https://displayvideo.googleapis.com/",
"schemas": {
"ActivateManualTriggerRequest": {
diff --git a/googleapiclient/discovery_cache/documents/dlp.v2.json b/googleapiclient/discovery_cache/documents/dlp.v2.json
index be1cda3e4..302254c48 100644
--- a/googleapiclient/discovery_cache/documents/dlp.v2.json
+++ b/googleapiclient/discovery_cache/documents/dlp.v2.json
@@ -3412,7 +3412,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210918",
"rootUrl": "https://dlp.googleapis.com/",
"schemas": {
"GooglePrivacyDlpV2Action": {
diff --git a/googleapiclient/discovery_cache/documents/dns.v1.json b/googleapiclient/discovery_cache/documents/dns.v1.json
index 11c88daac..ebd42260c 100644
--- a/googleapiclient/discovery_cache/documents/dns.v1.json
+++ b/googleapiclient/discovery_cache/documents/dns.v1.json
@@ -1235,7 +1235,7 @@
}
}
},
- "revision": "20210907",
+ "revision": "20210914",
"rootUrl": "https://dns.googleapis.com/",
"schemas": {
"Change": {
diff --git a/googleapiclient/discovery_cache/documents/dns.v1beta2.json b/googleapiclient/discovery_cache/documents/dns.v1beta2.json
index e4ed232f2..2255a62f9 100644
--- a/googleapiclient/discovery_cache/documents/dns.v1beta2.json
+++ b/googleapiclient/discovery_cache/documents/dns.v1beta2.json
@@ -1730,7 +1730,7 @@
}
}
},
- "revision": "20210907",
+ "revision": "20210914",
"rootUrl": "https://dns.googleapis.com/",
"schemas": {
"Change": {
diff --git a/googleapiclient/discovery_cache/documents/docs.v1.json b/googleapiclient/discovery_cache/documents/docs.v1.json
index fcc108f2f..3020bffc0 100644
--- a/googleapiclient/discovery_cache/documents/docs.v1.json
+++ b/googleapiclient/discovery_cache/documents/docs.v1.json
@@ -216,7 +216,7 @@
}
}
},
- "revision": "20210915",
+ "revision": "20210922",
"rootUrl": "https://docs.googleapis.com/",
"schemas": {
"AutoText": {
diff --git a/googleapiclient/discovery_cache/documents/documentai.v1.json b/googleapiclient/discovery_cache/documents/documentai.v1.json
index b2db14531..b84596730 100644
--- a/googleapiclient/discovery_cache/documents/documentai.v1.json
+++ b/googleapiclient/discovery_cache/documents/documentai.v1.json
@@ -1029,7 +1029,7 @@
}
}
},
- "revision": "20210917",
+ "revision": "20210926",
"rootUrl": "https://documentai.googleapis.com/",
"schemas": {
"GoogleCloudDocumentaiUiv1beta3BatchDeleteDocumentsMetadata": {
@@ -3145,7 +3145,7 @@
"id": "GoogleCloudDocumentaiV1SchemaEntityType",
"properties": {
"baseType": {
- "description": "Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address.",
+ "description": "Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address. `duration` - the entity is a duration.",
"type": "string"
},
"description": {
diff --git a/googleapiclient/discovery_cache/documents/documentai.v1beta2.json b/googleapiclient/discovery_cache/documents/documentai.v1beta2.json
index bff9e1223..455d309aa 100644
--- a/googleapiclient/discovery_cache/documents/documentai.v1beta2.json
+++ b/googleapiclient/discovery_cache/documents/documentai.v1beta2.json
@@ -292,7 +292,7 @@
}
}
},
- "revision": "20210917",
+ "revision": "20210926",
"rootUrl": "https://documentai.googleapis.com/",
"schemas": {
"GoogleCloudDocumentaiUiv1beta3BatchDeleteDocumentsMetadata": {
diff --git a/googleapiclient/discovery_cache/documents/documentai.v1beta3.json b/googleapiclient/discovery_cache/documents/documentai.v1beta3.json
index 9de9de9d9..526a19c37 100644
--- a/googleapiclient/discovery_cache/documents/documentai.v1beta3.json
+++ b/googleapiclient/discovery_cache/documents/documentai.v1beta3.json
@@ -796,7 +796,7 @@
}
}
},
- "revision": "20210917",
+ "revision": "20210926",
"rootUrl": "https://documentai.googleapis.com/",
"schemas": {
"GoogleCloudDocumentaiUiv1beta3BatchDeleteDocumentsMetadata": {
@@ -5621,7 +5621,7 @@
"id": "GoogleCloudDocumentaiV1beta3SchemaEntityType",
"properties": {
"baseType": {
- "description": "Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address.",
+ "description": "Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address. `duration` - the entity is a duration.",
"type": "string"
},
"description": {
diff --git a/googleapiclient/discovery_cache/documents/domainsrdap.v1.json b/googleapiclient/discovery_cache/documents/domainsrdap.v1.json
index 34f49e627..058f7fabd 100644
--- a/googleapiclient/discovery_cache/documents/domainsrdap.v1.json
+++ b/googleapiclient/discovery_cache/documents/domainsrdap.v1.json
@@ -289,7 +289,7 @@
}
}
},
- "revision": "20210920",
+ "revision": "20210925",
"rootUrl": "https://domainsrdap.googleapis.com/",
"schemas": {
"HttpBody": {
diff --git a/googleapiclient/discovery_cache/documents/doubleclickbidmanager.v1.1.json b/googleapiclient/discovery_cache/documents/doubleclickbidmanager.v1.1.json
index 499c794be..a3538ef1d 100644
--- a/googleapiclient/discovery_cache/documents/doubleclickbidmanager.v1.1.json
+++ b/googleapiclient/discovery_cache/documents/doubleclickbidmanager.v1.1.json
@@ -280,7 +280,7 @@
}
}
},
- "revision": "20210824",
+ "revision": "20210920",
"rootUrl": "https://doubleclickbidmanager.googleapis.com/",
"schemas": {
"ChannelGrouping": {
@@ -618,7 +618,15 @@
"FILTER_INSERTION_ORDER_GOAL_VALUE",
"FILTER_OMID_CAPABLE",
"FILTER_VENDOR_MEASUREMENT_MODE",
- "FILTER_IMPRESSION_LOSS_REJECTION_REASON"
+ "FILTER_IMPRESSION_LOSS_REJECTION_REASON",
+ "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_START",
+ "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_FIRST_QUARTILE",
+ "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_MID_POINT",
+ "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_THIRD_QUARTILE",
+ "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_COMPLETE",
+ "FILTER_VERIFICATION_VIDEO_RESIZED",
+ "FILTER_VERIFICATION_AUDIBILITY_START",
+ "FILTER_VERIFICATION_AUDIBILITY_COMPLETE"
],
"enumDescriptions": [
"",
@@ -902,6 +910,14 @@
"",
"",
"",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
""
],
"type": "string"
@@ -1268,7 +1284,15 @@
"FILTER_INSERTION_ORDER_GOAL_VALUE",
"FILTER_OMID_CAPABLE",
"FILTER_VENDOR_MEASUREMENT_MODE",
- "FILTER_IMPRESSION_LOSS_REJECTION_REASON"
+ "FILTER_IMPRESSION_LOSS_REJECTION_REASON",
+ "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_START",
+ "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_FIRST_QUARTILE",
+ "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_MID_POINT",
+ "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_THIRD_QUARTILE",
+ "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_COMPLETE",
+ "FILTER_VERIFICATION_VIDEO_RESIZED",
+ "FILTER_VERIFICATION_AUDIBILITY_START",
+ "FILTER_VERIFICATION_AUDIBILITY_COMPLETE"
],
"enumDescriptions": [
"",
@@ -1552,6 +1576,14 @@
"",
"",
"",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
""
],
"type": "string"
@@ -2032,7 +2064,8 @@
"METRIC_WIN_LOSS_DEAL_AVAILABLE_REQUESTS",
"METRIC_WIN_LOSS_LINEITEM_AVAILABLE_REQUESTS",
"METRIC_WIN_LOSS_DEAL_TARGETED_IMPRESSIONS",
- "METRIC_WIN_LOSS_LINEITEM_TARGETED_IMPRESSIONS"
+ "METRIC_WIN_LOSS_LINEITEM_TARGETED_IMPRESSIONS",
+ "METRIC_VERIFICATION_VIDEO_PLAYER_SIZE_MEASURABLE_IMPRESSIONS"
],
"enumDescriptions": [
"",
@@ -2501,6 +2534,7 @@
"",
"",
"",
+ "",
""
],
"type": "string"
@@ -2924,7 +2958,15 @@
"FILTER_INSERTION_ORDER_GOAL_VALUE",
"FILTER_OMID_CAPABLE",
"FILTER_VENDOR_MEASUREMENT_MODE",
- "FILTER_IMPRESSION_LOSS_REJECTION_REASON"
+ "FILTER_IMPRESSION_LOSS_REJECTION_REASON",
+ "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_START",
+ "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_FIRST_QUARTILE",
+ "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_MID_POINT",
+ "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_THIRD_QUARTILE",
+ "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_COMPLETE",
+ "FILTER_VERIFICATION_VIDEO_RESIZED",
+ "FILTER_VERIFICATION_AUDIBILITY_START",
+ "FILTER_VERIFICATION_AUDIBILITY_COMPLETE"
],
"enumDescriptions": [
"",
@@ -3208,6 +3250,14 @@
"",
"",
"",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
""
],
"type": "string"
diff --git a/googleapiclient/discovery_cache/documents/doubleclickbidmanager.v1.json b/googleapiclient/discovery_cache/documents/doubleclickbidmanager.v1.json
index fd1da51fc..70e11f9a9 100644
--- a/googleapiclient/discovery_cache/documents/doubleclickbidmanager.v1.json
+++ b/googleapiclient/discovery_cache/documents/doubleclickbidmanager.v1.json
@@ -96,7 +96,7 @@
},
"protocol": "rest",
"resources": {},
- "revision": "20210824",
+ "revision": "20210920",
"rootUrl": "https://doubleclickbidmanager.googleapis.com/",
"schemas": {},
"servicePath": "doubleclickbidmanager/v1/",
diff --git a/googleapiclient/discovery_cache/documents/doubleclicksearch.v2.json b/googleapiclient/discovery_cache/documents/doubleclicksearch.v2.json
index 77f2b99a9..ed8de2c3f 100644
--- a/googleapiclient/discovery_cache/documents/doubleclicksearch.v2.json
+++ b/googleapiclient/discovery_cache/documents/doubleclicksearch.v2.json
@@ -399,7 +399,7 @@
}
}
},
- "revision": "20210914",
+ "revision": "20210921",
"rootUrl": "https://doubleclicksearch.googleapis.com/",
"schemas": {
"Availability": {
diff --git a/googleapiclient/discovery_cache/documents/drive.v2.json b/googleapiclient/discovery_cache/documents/drive.v2.json
index 6eda1b5fc..4a4c8a8b9 100644
--- a/googleapiclient/discovery_cache/documents/drive.v2.json
+++ b/googleapiclient/discovery_cache/documents/drive.v2.json
@@ -38,7 +38,7 @@
"description": "Manages files in Drive including uploading, downloading, searching, detecting changes, and updating sharing permissions.",
"discoveryVersion": "v1",
"documentationLink": "https://developers.google.com/drive/",
- "etag": "\"uWj2hSb4GVjzdDlAnRd2gbM1ZQ8/Vu4poje-2dWgpys_T9FZKaW40Qk\"",
+ "etag": "\"uWj2hSb4GVjzdDlAnRd2gbM1ZQ8/1zEdrQsJh-Ob8KbDsrp0vIynqK0\"",
"icons": {
"x16": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_16.png",
"x32": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_32.png"
@@ -1659,7 +1659,7 @@
]
},
"patch": {
- "description": "Updates file metadata and/or content. This method supports patch semantics.",
+ "description": "Updates a file's metadata and/or content. When calling this method, only populate fields in the request that you want to modify. When updating fields, some fields might might change automatically, such as modifiedDate. This method supports patch semantics.",
"httpMethod": "PATCH",
"id": "drive.files.patch",
"parameterOrder": [
@@ -1931,7 +1931,7 @@
]
},
"update": {
- "description": "Updates file metadata and/or content.",
+ "description": "Updates a file's metadata and/or content. When calling this method, only populate fields in the request that you want to modify. When updating fields, some fields might be changed automatically, such as modifiedDate. This method supports patch semantics.",
"httpMethod": "PUT",
"id": "drive.files.update",
"mediaUpload": {
@@ -3527,7 +3527,7 @@
}
}
},
- "revision": "20210912",
+ "revision": "20210921",
"rootUrl": "https://www.googleapis.com/",
"schemas": {
"About": {
diff --git a/googleapiclient/discovery_cache/documents/drive.v3.json b/googleapiclient/discovery_cache/documents/drive.v3.json
index db01e31ca..52e62fa57 100644
--- a/googleapiclient/discovery_cache/documents/drive.v3.json
+++ b/googleapiclient/discovery_cache/documents/drive.v3.json
@@ -35,7 +35,7 @@
"description": "Manages files in Drive including uploading, downloading, searching, detecting changes, and updating sharing permissions.",
"discoveryVersion": "v1",
"documentationLink": "https://developers.google.com/drive/",
- "etag": "\"uWj2hSb4GVjzdDlAnRd2gbM1ZQ8/FI7fSrKiR16OF44lPfi5QIDdyps\"",
+ "etag": "\"uWj2hSb4GVjzdDlAnRd2gbM1ZQ8/W19uzic4WgAV-31T2Pfs85esB2s\"",
"icons": {
"x16": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_16.png",
"x32": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_32.png"
@@ -1211,7 +1211,7 @@
]
},
"update": {
- "description": "Updates a file's metadata and/or content. This method supports patch semantics.",
+ "description": "Updates a file's metadata and/or content. When calling this method, only populate fields in the request that you want to modify. When updating fields, some fields might change automatically, such as modifiedDate. This method supports patch semantics.",
"httpMethod": "PATCH",
"id": "drive.files.update",
"mediaUpload": {
@@ -2191,7 +2191,7 @@
}
}
},
- "revision": "20210912",
+ "revision": "20210921",
"rootUrl": "https://www.googleapis.com/",
"schemas": {
"About": {
diff --git a/googleapiclient/discovery_cache/documents/driveactivity.v2.json b/googleapiclient/discovery_cache/documents/driveactivity.v2.json
index 8499a9eed..e59794d3e 100644
--- a/googleapiclient/discovery_cache/documents/driveactivity.v2.json
+++ b/googleapiclient/discovery_cache/documents/driveactivity.v2.json
@@ -132,7 +132,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210922",
"rootUrl": "https://driveactivity.googleapis.com/",
"schemas": {
"Action": {
diff --git a/googleapiclient/discovery_cache/documents/essentialcontacts.v1.json b/googleapiclient/discovery_cache/documents/essentialcontacts.v1.json
index 1294882af..5b43f8332 100644
--- a/googleapiclient/discovery_cache/documents/essentialcontacts.v1.json
+++ b/googleapiclient/discovery_cache/documents/essentialcontacts.v1.json
@@ -850,7 +850,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://essentialcontacts.googleapis.com/",
"schemas": {
"GoogleCloudEssentialcontactsV1ComputeContactsResponse": {
diff --git a/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json b/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json
index dcbdd4488..1acd6d0df 100644
--- a/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json
+++ b/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json
@@ -304,7 +304,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://factchecktools.googleapis.com/",
"schemas": {
"GoogleFactcheckingFactchecktoolsV1alpha1Claim": {
diff --git a/googleapiclient/discovery_cache/documents/fcm.v1.json b/googleapiclient/discovery_cache/documents/fcm.v1.json
index b743964e4..6cd78d955 100644
--- a/googleapiclient/discovery_cache/documents/fcm.v1.json
+++ b/googleapiclient/discovery_cache/documents/fcm.v1.json
@@ -146,7 +146,7 @@
}
}
},
- "revision": "20210913",
+ "revision": "20210920",
"rootUrl": "https://fcm.googleapis.com/",
"schemas": {
"AndroidConfig": {
diff --git a/googleapiclient/discovery_cache/documents/fcmdata.v1beta1.json b/googleapiclient/discovery_cache/documents/fcmdata.v1beta1.json
index 4d3c9497f..59feaf4c5 100644
--- a/googleapiclient/discovery_cache/documents/fcmdata.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/fcmdata.v1beta1.json
@@ -154,7 +154,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://fcmdata.googleapis.com/",
"schemas": {
"GoogleFirebaseFcmDataV1beta1AndroidDeliveryData": {
diff --git a/googleapiclient/discovery_cache/documents/firebase.v1beta1.json b/googleapiclient/discovery_cache/documents/firebase.v1beta1.json
index 2508c2e4f..6ff19da33 100644
--- a/googleapiclient/discovery_cache/documents/firebase.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/firebase.v1beta1.json
@@ -1121,7 +1121,7 @@
}
}
},
- "revision": "20210917",
+ "revision": "20210924",
"rootUrl": "https://firebase.googleapis.com/",
"schemas": {
"AddFirebaseRequest": {
diff --git a/googleapiclient/discovery_cache/documents/firebaseappcheck.v1beta.json b/googleapiclient/discovery_cache/documents/firebaseappcheck.v1beta.json
index 8244fa899..bebc6bcc1 100644
--- a/googleapiclient/discovery_cache/documents/firebaseappcheck.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/firebaseappcheck.v1beta.json
@@ -1057,7 +1057,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210917",
"rootUrl": "https://firebaseappcheck.googleapis.com/",
"schemas": {
"GoogleFirebaseAppcheckV1betaAppAttestChallengeResponse": {
diff --git a/googleapiclient/discovery_cache/documents/firebasedatabase.v1beta.json b/googleapiclient/discovery_cache/documents/firebasedatabase.v1beta.json
index 0a1566409..fac1479a2 100644
--- a/googleapiclient/discovery_cache/documents/firebasedatabase.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/firebasedatabase.v1beta.json
@@ -317,7 +317,7 @@
}
}
},
- "revision": "20210917",
+ "revision": "20210924",
"rootUrl": "https://firebasedatabase.googleapis.com/",
"schemas": {
"DatabaseInstance": {
diff --git a/googleapiclient/discovery_cache/documents/firebasedynamiclinks.v1.json b/googleapiclient/discovery_cache/documents/firebasedynamiclinks.v1.json
index 893b1beca..727c518cb 100644
--- a/googleapiclient/discovery_cache/documents/firebasedynamiclinks.v1.json
+++ b/googleapiclient/discovery_cache/documents/firebasedynamiclinks.v1.json
@@ -224,7 +224,7 @@
}
}
},
- "revision": "20210913",
+ "revision": "20210920",
"rootUrl": "https://firebasedynamiclinks.googleapis.com/",
"schemas": {
"AnalyticsInfo": {
diff --git a/googleapiclient/discovery_cache/documents/firebasehosting.v1.json b/googleapiclient/discovery_cache/documents/firebasehosting.v1.json
index ddb7468eb..c473f78cf 100644
--- a/googleapiclient/discovery_cache/documents/firebasehosting.v1.json
+++ b/googleapiclient/discovery_cache/documents/firebasehosting.v1.json
@@ -186,7 +186,7 @@
}
}
},
- "revision": "20210818",
+ "revision": "20210921",
"rootUrl": "https://firebasehosting.googleapis.com/",
"schemas": {
"CancelOperationRequest": {
diff --git a/googleapiclient/discovery_cache/documents/firebasehosting.v1beta1.json b/googleapiclient/discovery_cache/documents/firebasehosting.v1beta1.json
index cdafb2e04..3eae5f50a 100644
--- a/googleapiclient/discovery_cache/documents/firebasehosting.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/firebasehosting.v1beta1.json
@@ -1939,7 +1939,7 @@
}
}
},
- "revision": "20210818",
+ "revision": "20210921",
"rootUrl": "https://firebasehosting.googleapis.com/",
"schemas": {
"ActingUser": {
diff --git a/googleapiclient/discovery_cache/documents/firebaseml.v1.json b/googleapiclient/discovery_cache/documents/firebaseml.v1.json
index 2fea2f5f5..fec8b588a 100644
--- a/googleapiclient/discovery_cache/documents/firebaseml.v1.json
+++ b/googleapiclient/discovery_cache/documents/firebaseml.v1.json
@@ -204,7 +204,7 @@
}
}
},
- "revision": "20210914",
+ "revision": "20210922",
"rootUrl": "https://firebaseml.googleapis.com/",
"schemas": {
"CancelOperationRequest": {
diff --git a/googleapiclient/discovery_cache/documents/firebaseml.v1beta2.json b/googleapiclient/discovery_cache/documents/firebaseml.v1beta2.json
index 1bb3b8faa..e59cff71b 100644
--- a/googleapiclient/discovery_cache/documents/firebaseml.v1beta2.json
+++ b/googleapiclient/discovery_cache/documents/firebaseml.v1beta2.json
@@ -318,7 +318,7 @@
}
}
},
- "revision": "20210914",
+ "revision": "20210922",
"rootUrl": "https://firebaseml.googleapis.com/",
"schemas": {
"DownloadModelResponse": {
diff --git a/googleapiclient/discovery_cache/documents/firebasestorage.v1beta.json b/googleapiclient/discovery_cache/documents/firebasestorage.v1beta.json
index 9d40b4b37..699f3e474 100644
--- a/googleapiclient/discovery_cache/documents/firebasestorage.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/firebasestorage.v1beta.json
@@ -238,7 +238,7 @@
}
}
},
- "revision": "20210823",
+ "revision": "20210917",
"rootUrl": "https://firebasestorage.googleapis.com/",
"schemas": {
"AddFirebaseRequest": {
@@ -301,7 +301,8 @@
"DELETING_TEMP_BUCKET",
"SUCCEEDED",
"FAILED",
- "ROLLING_BACK"
+ "ROLLING_BACK",
+ "ROLLED_BACK"
],
"enumDescriptions": [
"Unspecified state. Should not be used.",
@@ -313,8 +314,9 @@
"The second STS transfer to move all objects from the temp bucket to the final bucket is underway.",
"The temp bucket is being emptied and deleted.",
"All stages of the migration have completed and the operation has been marked done and updated with a response.",
- "The migration failed at some stage and it is not possible to continue retrying that stage. Manual recovery may be needed.",
- "The migration has encountered a permanent failure and is now being rolled back so that the source bucket is restored to its original state."
+ "The migration failed at some stage and it is not possible to continue retrying that stage. Manual recovery may be needed. Rollback is either impossible at this stage, or has been attempted and failed.",
+ "The migration has encountered a permanent failure and is now being rolled back so that the source bucket is restored to its original state.",
+ "The migration has been successfully rolled back."
],
"type": "string"
}
@@ -348,7 +350,8 @@
"DELETING_TEMP_BUCKET",
"SUCCEEDED",
"FAILED",
- "ROLLING_BACK"
+ "ROLLING_BACK",
+ "ROLLED_BACK"
],
"enumDescriptions": [
"Unspecified state. Should not be used.",
@@ -360,8 +363,9 @@
"The second STS transfer to move all objects from the temp bucket to the final bucket is underway.",
"The temp bucket is being emptied and deleted.",
"All stages of the migration have completed and the operation has been marked done and updated with a response.",
- "The migration failed at some stage and it is not possible to continue retrying that stage. Manual recovery may be needed.",
- "The migration has encountered a permanent failure and is now being rolled back so that the source bucket is restored to its original state."
+ "The migration failed at some stage and it is not possible to continue retrying that stage. Manual recovery may be needed. Rollback is either impossible at this stage, or has been attempted and failed.",
+ "The migration has encountered a permanent failure and is now being rolled back so that the source bucket is restored to its original state.",
+ "The migration has been successfully rolled back."
],
"type": "string"
}
diff --git a/googleapiclient/discovery_cache/documents/firestore.v1.json b/googleapiclient/discovery_cache/documents/firestore.v1.json
index ad548e696..ea7004a89 100644
--- a/googleapiclient/discovery_cache/documents/firestore.v1.json
+++ b/googleapiclient/discovery_cache/documents/firestore.v1.json
@@ -141,6 +141,32 @@
"https://www.googleapis.com/auth/datastore"
]
},
+ "get": {
+ "description": "Gets information about a database.",
+ "flatPath": "v1/projects/{projectsId}/databases/{databasesId}",
+ "httpMethod": "GET",
+ "id": "firestore.projects.databases.get",
+ "parameterOrder": [
+ "name"
+ ],
+ "parameters": {
+ "name": {
+ "description": "Required. A name of the form `projects/{project_id}/databases/{database_id}`",
+ "location": "path",
+ "pattern": "^projects/[^/]+/databases/[^/]+$",
+ "required": true,
+ "type": "string"
+ }
+ },
+ "path": "v1/{+name}",
+ "response": {
+ "$ref": "GoogleFirestoreAdminV1Database"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/datastore"
+ ]
+ },
"importDocuments": {
"description": "Imports documents into Google Cloud Firestore. Existing documents with the same name are overwritten. The import occurs in the background and its progress can be monitored and managed via the Operation resource that is created. If an ImportDocuments operation is cancelled, it is possible that a subset of the data has already been imported to Cloud Firestore.",
"flatPath": "v1/projects/{projectsId}/databases/{databasesId}:importDocuments",
@@ -169,6 +195,67 @@
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/datastore"
]
+ },
+ "list": {
+ "description": "List all the databases in the project.",
+ "flatPath": "v1/projects/{projectsId}/databases",
+ "httpMethod": "GET",
+ "id": "firestore.projects.databases.list",
+ "parameterOrder": [
+ "parent"
+ ],
+ "parameters": {
+ "parent": {
+ "description": "Required. A parent name of the form `projects/{project_id}`",
+ "location": "path",
+ "pattern": "^projects/[^/]+$",
+ "required": true,
+ "type": "string"
+ }
+ },
+ "path": "v1/{+parent}/databases",
+ "response": {
+ "$ref": "GoogleFirestoreAdminV1ListDatabasesResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/datastore"
+ ]
+ },
+ "patch": {
+ "description": "Updates a database.",
+ "flatPath": "v1/projects/{projectsId}/databases/{databasesId}",
+ "httpMethod": "PATCH",
+ "id": "firestore.projects.databases.patch",
+ "parameterOrder": [
+ "name"
+ ],
+ "parameters": {
+ "name": {
+ "description": "The resource name of the Database. Format: `projects/{project}/databases/{database}`",
+ "location": "path",
+ "pattern": "^projects/[^/]+/databases/[^/]+$",
+ "required": true,
+ "type": "string"
+ },
+ "updateMask": {
+ "description": "The list of fields to be updated.",
+ "format": "google-fieldmask",
+ "location": "query",
+ "type": "string"
+ }
+ },
+ "path": "v1/{+name}",
+ "request": {
+ "$ref": "GoogleFirestoreAdminV1Database"
+ },
+ "response": {
+ "$ref": "GoogleLongrunningOperation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/datastore"
+ ]
}
},
"resources": {
@@ -1160,7 +1247,7 @@
}
}
},
- "revision": "20210901",
+ "revision": "20210916",
"rootUrl": "https://firestore.googleapis.com/",
"schemas": {
"ArrayValue": {
@@ -1685,6 +1772,55 @@
},
"type": "object"
},
+ "GoogleFirestoreAdminV1Database": {
+ "description": "A Cloud Firestore Database in Native Mode. Currently one database is allowed per cloud project. It is named '(default)'",
+ "id": "GoogleFirestoreAdminV1Database",
+ "properties": {
+ "concurrencyMode": {
+ "description": "The concurrency control mode to use for this database.",
+ "enum": [
+ "CONCURRENCY_MODE_UNSPECIFIED",
+ "OPTIMISTIC",
+ "PESSIMISTIC",
+ "OPTIMISTIC_WITH_ENTITY_GROUPS"
+ ],
+ "enumDescriptions": [
+ "Not used.",
+ "Use optimistic concurrency control by default. This is the setting for Cloud Firestore customers.",
+ "Use pessimistic concurrency control by default. This is the setting for Cloud Firestore customers.",
+ "Use optimistic concurrency control with entity groups by default. This is the setting for Cloud Datastore customers."
+ ],
+ "type": "string"
+ },
+ "etag": {
+ "description": "This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.",
+ "type": "string"
+ },
+ "locationId": {
+ "description": "The location of the database. Available databases are listed at https://cloud.google.com/firestore/docs/locations.",
+ "type": "string"
+ },
+ "name": {
+ "description": "The resource name of the Database. Format: `projects/{project}/databases/{database}`",
+ "type": "string"
+ },
+ "type": {
+ "description": "The type of the database. See https://cloud.google.com/datastore/docs/firestore-or-datastore for information about how to choose.",
+ "enum": [
+ "DATABASE_TYPE_UNSPECIFIED",
+ "FIRESTORE_NATIVE",
+ "DATASTORE_MODE"
+ ],
+ "enumDescriptions": [
+ "The default value. This value is used if the database type is omitted.",
+ "Firestore Native Mode",
+ "Firestore in Datastore Mode."
+ ],
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"GoogleFirestoreAdminV1ExportDocumentsMetadata": {
"description": "Metadata for google.longrunning.Operation results from FirestoreAdmin.ExportDocuments.",
"id": "GoogleFirestoreAdminV1ExportDocumentsMetadata",
@@ -2116,6 +2252,20 @@
},
"type": "object"
},
+ "GoogleFirestoreAdminV1ListDatabasesResponse": {
+ "description": "The list of databases for a project.",
+ "id": "GoogleFirestoreAdminV1ListDatabasesResponse",
+ "properties": {
+ "databases": {
+ "description": "The databases in the project.",
+ "items": {
+ "$ref": "GoogleFirestoreAdminV1Database"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
"GoogleFirestoreAdminV1ListFieldsResponse": {
"description": "The response for FirestoreAdmin.ListFields.",
"id": "GoogleFirestoreAdminV1ListFieldsResponse",
diff --git a/googleapiclient/discovery_cache/documents/firestore.v1beta1.json b/googleapiclient/discovery_cache/documents/firestore.v1beta1.json
index 2fdc27b09..265480dbf 100644
--- a/googleapiclient/discovery_cache/documents/firestore.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/firestore.v1beta1.json
@@ -849,7 +849,7 @@
}
}
},
- "revision": "20210901",
+ "revision": "20210916",
"rootUrl": "https://firestore.googleapis.com/",
"schemas": {
"ArrayValue": {
diff --git a/googleapiclient/discovery_cache/documents/firestore.v1beta2.json b/googleapiclient/discovery_cache/documents/firestore.v1beta2.json
index ecdaa94cf..4cb3bbd20 100644
--- a/googleapiclient/discovery_cache/documents/firestore.v1beta2.json
+++ b/googleapiclient/discovery_cache/documents/firestore.v1beta2.json
@@ -415,7 +415,7 @@
}
}
},
- "revision": "20210901",
+ "revision": "20210916",
"rootUrl": "https://firestore.googleapis.com/",
"schemas": {
"Empty": {
diff --git a/googleapiclient/discovery_cache/documents/fitness.v1.json b/googleapiclient/discovery_cache/documents/fitness.v1.json
index 8b7cf46d0..e69540c18 100644
--- a/googleapiclient/discovery_cache/documents/fitness.v1.json
+++ b/googleapiclient/discovery_cache/documents/fitness.v1.json
@@ -831,7 +831,7 @@
}
}
},
- "revision": "20210920",
+ "revision": "20210922",
"rootUrl": "https://fitness.googleapis.com/",
"schemas": {
"AggregateBucket": {
@@ -937,7 +937,7 @@
"fitness.users.dataset.aggregate"
]
},
- "description": "The end of a window of time. Data that intersects with this time window will be aggregated. The time is in milliseconds since epoch, inclusive.",
+ "description": "The end of a window of time. Data that intersects with this time window will be aggregated. The time is in milliseconds since epoch, inclusive. The maximum allowed difference between start_time_millis // and end_time_millis is 7776000000 (roughly 90 days).",
"format": "int64",
"type": "string"
},
diff --git a/googleapiclient/discovery_cache/documents/games.v1.json b/googleapiclient/discovery_cache/documents/games.v1.json
index cdc18f1d8..4ec4c0451 100644
--- a/googleapiclient/discovery_cache/documents/games.v1.json
+++ b/googleapiclient/discovery_cache/documents/games.v1.json
@@ -1224,7 +1224,7 @@
}
}
},
- "revision": "20210909",
+ "revision": "20210922",
"rootUrl": "https://games.googleapis.com/",
"schemas": {
"AchievementDefinition": {
diff --git a/googleapiclient/discovery_cache/documents/gamesConfiguration.v1configuration.json b/googleapiclient/discovery_cache/documents/gamesConfiguration.v1configuration.json
index 4e0fee579..b3c428a08 100644
--- a/googleapiclient/discovery_cache/documents/gamesConfiguration.v1configuration.json
+++ b/googleapiclient/discovery_cache/documents/gamesConfiguration.v1configuration.json
@@ -439,7 +439,7 @@
}
}
},
- "revision": "20210909",
+ "revision": "20210922",
"rootUrl": "https://gamesconfiguration.googleapis.com/",
"schemas": {
"AchievementConfiguration": {
diff --git a/googleapiclient/discovery_cache/documents/gamesManagement.v1management.json b/googleapiclient/discovery_cache/documents/gamesManagement.v1management.json
index d9dd0efd0..4b4839f35 100644
--- a/googleapiclient/discovery_cache/documents/gamesManagement.v1management.json
+++ b/googleapiclient/discovery_cache/documents/gamesManagement.v1management.json
@@ -471,7 +471,7 @@
}
}
},
- "revision": "20210909",
+ "revision": "20210922",
"rootUrl": "https://gamesmanagement.googleapis.com/",
"schemas": {
"AchievementResetAllResponse": {
diff --git a/googleapiclient/discovery_cache/documents/gameservices.v1.json b/googleapiclient/discovery_cache/documents/gameservices.v1.json
index 811a5d33e..14cae4b92 100644
--- a/googleapiclient/discovery_cache/documents/gameservices.v1.json
+++ b/googleapiclient/discovery_cache/documents/gameservices.v1.json
@@ -1357,7 +1357,7 @@
}
}
},
- "revision": "20210907",
+ "revision": "20210915",
"rootUrl": "https://gameservices.googleapis.com/",
"schemas": {
"AuditConfig": {
@@ -2631,7 +2631,7 @@
"type": "array"
},
"logConfig": {
- "description": "The config returned to callers of tech.iam.IAM.CheckPolicy for any entries that match the LOG action.",
+ "description": "The config returned to callers of CheckPolicy for any entries that match the LOG action.",
"items": {
"$ref": "LogConfig"
},
diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1.json b/googleapiclient/discovery_cache/documents/gkehub.v1.json
index 6afe8e081..e5325101e 100644
--- a/googleapiclient/discovery_cache/documents/gkehub.v1.json
+++ b/googleapiclient/discovery_cache/documents/gkehub.v1.json
@@ -905,7 +905,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210920",
"rootUrl": "https://gkehub.googleapis.com/",
"schemas": {
"AuditConfig": {
@@ -1041,6 +1041,13 @@
"$ref": "ConfigManagementGitConfig",
"description": "Git repo configuration for the cluster."
},
+ "resourceRequirements": {
+ "additionalProperties": {
+ "$ref": "ConfigManagementContainerResourceRequirements"
+ },
+ "description": "Specifies CPU and memory limits for containers, keyed by container name",
+ "type": "object"
+ },
"sourceFormat": {
"description": "Specifies whether the Config Sync Repo is in \u201chierarchical\u201d or \u201cunstructured\u201d mode.",
"type": "string"
@@ -1052,6 +1059,22 @@
"description": "The state of ConfigSync's deployment on a cluster",
"id": "ConfigManagementConfigSyncDeploymentState",
"properties": {
+ "admissionWebhook": {
+ "description": "Deployment state of admission-webhook",
+ "enum": [
+ "DEPLOYMENT_STATE_UNSPECIFIED",
+ "NOT_INSTALLED",
+ "INSTALLED",
+ "ERROR"
+ ],
+ "enumDescriptions": [
+ "Deployment's state cannot be determined",
+ "Deployment is not installed",
+ "Deployment is installed",
+ "Deployment was attempted to be installed, but has errors"
+ ],
+ "type": "string"
+ },
"gitSync": {
"description": "Deployment state of the git-sync pod",
"enum": [
@@ -1174,6 +1197,10 @@
"description": "Specific versioning information pertaining to ConfigSync's Pods",
"id": "ConfigManagementConfigSyncVersion",
"properties": {
+ "admissionWebhook": {
+ "description": "Version of the deployed admission_webhook pod",
+ "type": "string"
+ },
"gitSync": {
"description": "Version of the deployed git-sync pod",
"type": "string"
@@ -1201,6 +1228,25 @@
},
"type": "object"
},
+ "ConfigManagementContainerResourceRequirements": {
+ "description": "ResourceRequirements allows to override the CPU and memory resource requirements of a container.",
+ "id": "ConfigManagementContainerResourceRequirements",
+ "properties": {
+ "containerName": {
+ "description": "Name of the container",
+ "type": "string"
+ },
+ "cpuLimit": {
+ "$ref": "ConfigManagementQuantity",
+ "description": "Allows to override the CPU limit of a container"
+ },
+ "memoryLimit": {
+ "$ref": "ConfigManagementQuantity",
+ "description": "Allows to override the memory limit of a container"
+ }
+ },
+ "type": "object"
+ },
"ConfigManagementErrorResource": {
"description": "Model for a config file in the git repo with an associated Sync error",
"id": "ConfigManagementErrorResource",
@@ -1275,6 +1321,10 @@
"description": "URL for the HTTPS proxy to be used when communicating with the Git repo.",
"type": "string"
},
+ "noSslVerify": {
+ "description": "Enable or disable the SSL certificate verification Default: false.",
+ "type": "boolean"
+ },
"policyDir": {
"description": "The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.",
"type": "string"
@@ -1287,6 +1337,11 @@
"description": "The branch of the repository to sync from. Default: master.",
"type": "string"
},
+ "syncDepth": {
+ "description": "The depth of git commits synced by the git-sync container.",
+ "format": "int64",
+ "type": "string"
+ },
"syncRepo": {
"description": "The URL of the Git repository to use as the source of truth.",
"type": "string"
@@ -1570,6 +1625,17 @@
},
"type": "object"
},
+ "ConfigManagementQuantity": {
+ "description": "The view model of a single quantity, e.g. \"800 MiB\". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto",
+ "id": "ConfigManagementQuantity",
+ "properties": {
+ "string": {
+ "description": "Stringified version of the quantity, e.g., \"800 MiB\".",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"ConfigManagementSyncError": {
"description": "An ACM created error representing a problem syncing configurations",
"id": "ConfigManagementSyncError",
diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json b/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json
index e34ba3289..d4ad8eec0 100644
--- a/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json
+++ b/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json
@@ -670,7 +670,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210920",
"rootUrl": "https://gkehub.googleapis.com/",
"schemas": {
"AuditConfig": {
@@ -851,6 +851,13 @@
"$ref": "ConfigManagementGitConfig",
"description": "Git repo configuration for the cluster."
},
+ "resourceRequirements": {
+ "additionalProperties": {
+ "$ref": "ConfigManagementContainerResourceRequirements"
+ },
+ "description": "Specifies CPU and memory limits for containers, keyed by container name",
+ "type": "object"
+ },
"sourceFormat": {
"description": "Specifies whether the Config Sync Repo is in \u201chierarchical\u201d or \u201cunstructured\u201d mode.",
"type": "string"
@@ -862,6 +869,22 @@
"description": "The state of ConfigSync's deployment on a cluster",
"id": "ConfigManagementConfigSyncDeploymentState",
"properties": {
+ "admissionWebhook": {
+ "description": "Deployment state of admission-webhook",
+ "enum": [
+ "DEPLOYMENT_STATE_UNSPECIFIED",
+ "NOT_INSTALLED",
+ "INSTALLED",
+ "ERROR"
+ ],
+ "enumDescriptions": [
+ "Deployment's state cannot be determined",
+ "Deployment is not installed",
+ "Deployment is installed",
+ "Deployment was attempted to be installed, but has errors"
+ ],
+ "type": "string"
+ },
"gitSync": {
"description": "Deployment state of the git-sync pod",
"enum": [
@@ -984,6 +1007,10 @@
"description": "Specific versioning information pertaining to ConfigSync's Pods",
"id": "ConfigManagementConfigSyncVersion",
"properties": {
+ "admissionWebhook": {
+ "description": "Version of the deployed admission_webhook pod",
+ "type": "string"
+ },
"gitSync": {
"description": "Version of the deployed git-sync pod",
"type": "string"
@@ -1011,6 +1038,25 @@
},
"type": "object"
},
+ "ConfigManagementContainerResourceRequirements": {
+ "description": "ResourceRequirements allows to override the CPU and memory resource requirements of a container.",
+ "id": "ConfigManagementContainerResourceRequirements",
+ "properties": {
+ "containerName": {
+ "description": "Name of the container",
+ "type": "string"
+ },
+ "cpuLimit": {
+ "$ref": "ConfigManagementQuantity",
+ "description": "Allows to override the CPU limit of a container"
+ },
+ "memoryLimit": {
+ "$ref": "ConfigManagementQuantity",
+ "description": "Allows to override the memory limit of a container"
+ }
+ },
+ "type": "object"
+ },
"ConfigManagementErrorResource": {
"description": "Model for a config file in the git repo with an associated Sync error",
"id": "ConfigManagementErrorResource",
@@ -1101,6 +1147,10 @@
"description": "URL for the HTTPS proxy to be used when communicating with the Git repo.",
"type": "string"
},
+ "noSslVerify": {
+ "description": "Enable or disable the SSL certificate verification Default: false.",
+ "type": "boolean"
+ },
"policyDir": {
"description": "The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.",
"type": "string"
@@ -1113,6 +1163,11 @@
"description": "The branch of the repository to sync from. Default: master.",
"type": "string"
},
+ "syncDepth": {
+ "description": "The depth of git commits synced by the git-sync container.",
+ "format": "int64",
+ "type": "string"
+ },
"syncRepo": {
"description": "The URL of the Git repository to use as the source of truth.",
"type": "string"
@@ -1408,6 +1463,17 @@
},
"type": "object"
},
+ "ConfigManagementQuantity": {
+ "description": "The view model of a single quantity, e.g. \"800 MiB\". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto",
+ "id": "ConfigManagementQuantity",
+ "properties": {
+ "string": {
+ "description": "Stringified version of the quantity, e.g., \"800 MiB\".",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"ConfigManagementSyncError": {
"description": "An ACM created error representing a problem syncing configurations",
"id": "ConfigManagementSyncError",
diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1alpha2.json b/googleapiclient/discovery_cache/documents/gkehub.v1alpha2.json
index 2af97cb5c..162f0188f 100644
--- a/googleapiclient/discovery_cache/documents/gkehub.v1alpha2.json
+++ b/googleapiclient/discovery_cache/documents/gkehub.v1alpha2.json
@@ -652,7 +652,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210920",
"rootUrl": "https://gkehub.googleapis.com/",
"schemas": {
"AuditConfig": {
diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1beta.json b/googleapiclient/discovery_cache/documents/gkehub.v1beta.json
index 3bd257818..d55114799 100644
--- a/googleapiclient/discovery_cache/documents/gkehub.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/gkehub.v1beta.json
@@ -670,7 +670,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210920",
"rootUrl": "https://gkehub.googleapis.com/",
"schemas": {
"AuditConfig": {
@@ -829,6 +829,13 @@
"$ref": "ConfigManagementGitConfig",
"description": "Git repo configuration for the cluster."
},
+ "resourceRequirements": {
+ "additionalProperties": {
+ "$ref": "ConfigManagementContainerResourceRequirements"
+ },
+ "description": "Specifies CPU and memory limits for containers, keyed by container name",
+ "type": "object"
+ },
"sourceFormat": {
"description": "Specifies whether the Config Sync Repo is in \u201chierarchical\u201d or \u201cunstructured\u201d mode.",
"type": "string"
@@ -840,6 +847,22 @@
"description": "The state of ConfigSync's deployment on a cluster",
"id": "ConfigManagementConfigSyncDeploymentState",
"properties": {
+ "admissionWebhook": {
+ "description": "Deployment state of admission-webhook",
+ "enum": [
+ "DEPLOYMENT_STATE_UNSPECIFIED",
+ "NOT_INSTALLED",
+ "INSTALLED",
+ "ERROR"
+ ],
+ "enumDescriptions": [
+ "Deployment's state cannot be determined",
+ "Deployment is not installed",
+ "Deployment is installed",
+ "Deployment was attempted to be installed, but has errors"
+ ],
+ "type": "string"
+ },
"gitSync": {
"description": "Deployment state of the git-sync pod",
"enum": [
@@ -962,6 +985,10 @@
"description": "Specific versioning information pertaining to ConfigSync's Pods",
"id": "ConfigManagementConfigSyncVersion",
"properties": {
+ "admissionWebhook": {
+ "description": "Version of the deployed admission_webhook pod",
+ "type": "string"
+ },
"gitSync": {
"description": "Version of the deployed git-sync pod",
"type": "string"
@@ -989,6 +1016,25 @@
},
"type": "object"
},
+ "ConfigManagementContainerResourceRequirements": {
+ "description": "ResourceRequirements allows to override the CPU and memory resource requirements of a container.",
+ "id": "ConfigManagementContainerResourceRequirements",
+ "properties": {
+ "containerName": {
+ "description": "Name of the container",
+ "type": "string"
+ },
+ "cpuLimit": {
+ "$ref": "ConfigManagementQuantity",
+ "description": "Allows to override the CPU limit of a container"
+ },
+ "memoryLimit": {
+ "$ref": "ConfigManagementQuantity",
+ "description": "Allows to override the memory limit of a container"
+ }
+ },
+ "type": "object"
+ },
"ConfigManagementErrorResource": {
"description": "Model for a config file in the git repo with an associated Sync error",
"id": "ConfigManagementErrorResource",
@@ -1063,6 +1109,10 @@
"description": "URL for the HTTPS proxy to be used when communicating with the Git repo.",
"type": "string"
},
+ "noSslVerify": {
+ "description": "Enable or disable the SSL certificate verification Default: false.",
+ "type": "boolean"
+ },
"policyDir": {
"description": "The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.",
"type": "string"
@@ -1075,6 +1125,11 @@
"description": "The branch of the repository to sync from. Default: master.",
"type": "string"
},
+ "syncDepth": {
+ "description": "The depth of git commits synced by the git-sync container.",
+ "format": "int64",
+ "type": "string"
+ },
"syncRepo": {
"description": "The URL of the Git repository to use as the source of truth.",
"type": "string"
@@ -1366,6 +1421,17 @@
},
"type": "object"
},
+ "ConfigManagementQuantity": {
+ "description": "The view model of a single quantity, e.g. \"800 MiB\". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto",
+ "id": "ConfigManagementQuantity",
+ "properties": {
+ "string": {
+ "description": "Stringified version of the quantity, e.g., \"800 MiB\".",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"ConfigManagementSyncError": {
"description": "An ACM created error representing a problem syncing configurations",
"id": "ConfigManagementSyncError",
diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json b/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json
index 61a49957e..1f6fc6064 100644
--- a/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json
@@ -706,7 +706,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210920",
"rootUrl": "https://gkehub.googleapis.com/",
"schemas": {
"AuditConfig": {
diff --git a/googleapiclient/discovery_cache/documents/gmail.v1.json b/googleapiclient/discovery_cache/documents/gmail.v1.json
index aab0f3192..26fe0c0c5 100644
--- a/googleapiclient/discovery_cache/documents/gmail.v1.json
+++ b/googleapiclient/discovery_cache/documents/gmail.v1.json
@@ -2682,7 +2682,7 @@
}
}
},
- "revision": "20210906",
+ "revision": "20210920",
"rootUrl": "https://gmail.googleapis.com/",
"schemas": {
"AutoForwarding": {
diff --git a/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1.json b/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1.json
index 89d69a47f..7f318139c 100644
--- a/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1.json
+++ b/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1.json
@@ -265,7 +265,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://gmailpostmastertools.googleapis.com/",
"schemas": {
"DeliveryError": {
diff --git a/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1beta1.json b/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1beta1.json
index 61c9165c1..ff95b1532 100644
--- a/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1beta1.json
@@ -265,7 +265,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://gmailpostmastertools.googleapis.com/",
"schemas": {
"DeliveryError": {
diff --git a/googleapiclient/discovery_cache/documents/groupsmigration.v1.json b/googleapiclient/discovery_cache/documents/groupsmigration.v1.json
index 6f103c1f7..20fccf86c 100644
--- a/googleapiclient/discovery_cache/documents/groupsmigration.v1.json
+++ b/googleapiclient/discovery_cache/documents/groupsmigration.v1.json
@@ -146,7 +146,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210919",
"rootUrl": "https://groupsmigration.googleapis.com/",
"schemas": {
"Groups": {
diff --git a/googleapiclient/discovery_cache/documents/groupssettings.v1.json b/googleapiclient/discovery_cache/documents/groupssettings.v1.json
index e62c4a645..7bfc551b5 100644
--- a/googleapiclient/discovery_cache/documents/groupssettings.v1.json
+++ b/googleapiclient/discovery_cache/documents/groupssettings.v1.json
@@ -152,7 +152,7 @@
}
}
},
- "revision": "20210914",
+ "revision": "20210921",
"rootUrl": "https://www.googleapis.com/",
"schemas": {
"Groups": {
diff --git a/googleapiclient/discovery_cache/documents/healthcare.v1.json b/googleapiclient/discovery_cache/documents/healthcare.v1.json
index d45d11c7a..05eeefa48 100644
--- a/googleapiclient/discovery_cache/documents/healthcare.v1.json
+++ b/googleapiclient/discovery_cache/documents/healthcare.v1.json
@@ -3956,7 +3956,7 @@
}
}
},
- "revision": "20210909",
+ "revision": "20210914",
"rootUrl": "https://healthcare.googleapis.com/",
"schemas": {
"ActivateConsentRequest": {
@@ -4071,7 +4071,7 @@
"id": "AttributeDefinition",
"properties": {
"allowedValues": {
- "description": "Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation.",
+ "description": "Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation.",
"items": {
"type": "string"
},
diff --git a/googleapiclient/discovery_cache/documents/healthcare.v1beta1.json b/googleapiclient/discovery_cache/documents/healthcare.v1beta1.json
index cf53cbcec..64553b0c9 100644
--- a/googleapiclient/discovery_cache/documents/healthcare.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/healthcare.v1beta1.json
@@ -4865,7 +4865,7 @@
}
}
},
- "revision": "20210909",
+ "revision": "20210914",
"rootUrl": "https://healthcare.googleapis.com/",
"schemas": {
"ActivateConsentRequest": {
@@ -5058,7 +5058,7 @@
"id": "AttributeDefinition",
"properties": {
"allowedValues": {
- "description": "Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation.",
+ "description": "Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation.",
"items": {
"type": "string"
},
diff --git a/googleapiclient/discovery_cache/documents/homegraph.v1.json b/googleapiclient/discovery_cache/documents/homegraph.v1.json
index b40886f2d..e6584f6b9 100644
--- a/googleapiclient/discovery_cache/documents/homegraph.v1.json
+++ b/googleapiclient/discovery_cache/documents/homegraph.v1.json
@@ -216,7 +216,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210920",
"rootUrl": "https://homegraph.googleapis.com/",
"schemas": {
"AgentDeviceId": {
diff --git a/googleapiclient/discovery_cache/documents/iam.v1.json b/googleapiclient/discovery_cache/documents/iam.v1.json
index dd69da408..edaea64b7 100644
--- a/googleapiclient/discovery_cache/documents/iam.v1.json
+++ b/googleapiclient/discovery_cache/documents/iam.v1.json
@@ -1484,7 +1484,7 @@
]
},
"disable": {
- "description": "Disable a ServiceAccountKey. A disabled service account key can be enabled through EnableServiceAccountKey. The API is currently in preview phase.",
+ "description": "Disable a ServiceAccountKey. A disabled service account key can be enabled through EnableServiceAccountKey.",
"flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys/{keysId}:disable",
"httpMethod": "POST",
"id": "iam.projects.serviceAccounts.keys.disable",
@@ -1512,7 +1512,7 @@
]
},
"enable": {
- "description": "Enable a ServiceAccountKey. The API is currently in preview phase.",
+ "description": "Enable a ServiceAccountKey.",
"flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys/{keysId}:enable",
"httpMethod": "POST",
"id": "iam.projects.serviceAccounts.keys.enable",
@@ -1752,7 +1752,7 @@
}
}
},
- "revision": "20210909",
+ "revision": "20210918",
"rootUrl": "https://iam.googleapis.com/",
"schemas": {
"AdminAuditData": {
diff --git a/googleapiclient/discovery_cache/documents/iamcredentials.v1.json b/googleapiclient/discovery_cache/documents/iamcredentials.v1.json
index 14d14943c..45fe82c14 100644
--- a/googleapiclient/discovery_cache/documents/iamcredentials.v1.json
+++ b/googleapiclient/discovery_cache/documents/iamcredentials.v1.json
@@ -226,7 +226,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210916",
"rootUrl": "https://iamcredentials.googleapis.com/",
"schemas": {
"GenerateAccessTokenRequest": {
diff --git a/googleapiclient/discovery_cache/documents/iap.v1.json b/googleapiclient/discovery_cache/documents/iap.v1.json
index 93cd65d65..80fd85457 100644
--- a/googleapiclient/discovery_cache/documents/iap.v1.json
+++ b/googleapiclient/discovery_cache/documents/iap.v1.json
@@ -487,7 +487,7 @@
}
}
},
- "revision": "20210820",
+ "revision": "20210924",
"rootUrl": "https://iap.googleapis.com/",
"schemas": {
"AccessDeniedPageSettings": {
diff --git a/googleapiclient/discovery_cache/documents/iap.v1beta1.json b/googleapiclient/discovery_cache/documents/iap.v1beta1.json
index dcac02e93..0685cb79b 100644
--- a/googleapiclient/discovery_cache/documents/iap.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/iap.v1beta1.json
@@ -194,7 +194,7 @@
}
}
},
- "revision": "20210820",
+ "revision": "20210924",
"rootUrl": "https://iap.googleapis.com/",
"schemas": {
"Binding": {
diff --git a/googleapiclient/discovery_cache/documents/ideahub.v1alpha.json b/googleapiclient/discovery_cache/documents/ideahub.v1alpha.json
index 3b0014220..75abd26d4 100644
--- a/googleapiclient/discovery_cache/documents/ideahub.v1alpha.json
+++ b/googleapiclient/discovery_cache/documents/ideahub.v1alpha.json
@@ -331,7 +331,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://ideahub.googleapis.com/",
"schemas": {
"GoogleSearchIdeahubV1alphaAvailableLocale": {
diff --git a/googleapiclient/discovery_cache/documents/ideahub.v1beta.json b/googleapiclient/discovery_cache/documents/ideahub.v1beta.json
index c815acc9c..63c0c3ffa 100644
--- a/googleapiclient/discovery_cache/documents/ideahub.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/ideahub.v1beta.json
@@ -288,7 +288,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://ideahub.googleapis.com/",
"schemas": {
"GoogleSearchIdeahubV1betaAvailableLocale": {
diff --git a/googleapiclient/discovery_cache/documents/indexing.v3.json b/googleapiclient/discovery_cache/documents/indexing.v3.json
index f553338e7..f43fb0bf6 100644
--- a/googleapiclient/discovery_cache/documents/indexing.v3.json
+++ b/googleapiclient/discovery_cache/documents/indexing.v3.json
@@ -149,7 +149,7 @@
}
}
},
- "revision": "20210907",
+ "revision": "20210914",
"rootUrl": "https://indexing.googleapis.com/",
"schemas": {
"PublishUrlNotificationResponse": {
diff --git a/googleapiclient/discovery_cache/documents/jobs.v3.json b/googleapiclient/discovery_cache/documents/jobs.v3.json
index 04baf10cf..d3701cb7b 100644
--- a/googleapiclient/discovery_cache/documents/jobs.v3.json
+++ b/googleapiclient/discovery_cache/documents/jobs.v3.json
@@ -651,7 +651,7 @@
}
}
},
- "revision": "20210830",
+ "revision": "20210914",
"rootUrl": "https://jobs.googleapis.com/",
"schemas": {
"ApplicationInfo": {
diff --git a/googleapiclient/discovery_cache/documents/jobs.v3p1beta1.json b/googleapiclient/discovery_cache/documents/jobs.v3p1beta1.json
index 3e2395f02..447b80a5f 100644
--- a/googleapiclient/discovery_cache/documents/jobs.v3p1beta1.json
+++ b/googleapiclient/discovery_cache/documents/jobs.v3p1beta1.json
@@ -681,7 +681,7 @@
}
}
},
- "revision": "20210830",
+ "revision": "20210914",
"rootUrl": "https://jobs.googleapis.com/",
"schemas": {
"ApplicationInfo": {
diff --git a/googleapiclient/discovery_cache/documents/jobs.v4.json b/googleapiclient/discovery_cache/documents/jobs.v4.json
index 591d1d981..34978e0ff 100644
--- a/googleapiclient/discovery_cache/documents/jobs.v4.json
+++ b/googleapiclient/discovery_cache/documents/jobs.v4.json
@@ -903,7 +903,7 @@
}
}
},
- "revision": "20210830",
+ "revision": "20210914",
"rootUrl": "https://jobs.googleapis.com/",
"schemas": {
"ApplicationInfo": {
diff --git a/googleapiclient/discovery_cache/documents/keep.v1.json b/googleapiclient/discovery_cache/documents/keep.v1.json
index 1fef707ae..0a0ab8ddf 100644
--- a/googleapiclient/discovery_cache/documents/keep.v1.json
+++ b/googleapiclient/discovery_cache/documents/keep.v1.json
@@ -314,7 +314,7 @@
}
}
},
- "revision": "20210920",
+ "revision": "20210921",
"rootUrl": "https://keep.googleapis.com/",
"schemas": {
"Attachment": {
diff --git a/googleapiclient/discovery_cache/documents/kgsearch.v1.json b/googleapiclient/discovery_cache/documents/kgsearch.v1.json
index b8bba2f26..a648b7f7c 100644
--- a/googleapiclient/discovery_cache/documents/kgsearch.v1.json
+++ b/googleapiclient/discovery_cache/documents/kgsearch.v1.json
@@ -151,7 +151,7 @@
}
}
},
- "revision": "20210827",
+ "revision": "20210918",
"rootUrl": "https://kgsearch.googleapis.com/",
"schemas": {
"SearchResponse": {
diff --git a/googleapiclient/discovery_cache/documents/language.v1.json b/googleapiclient/discovery_cache/documents/language.v1.json
index 625ab083c..e1d87afc3 100644
--- a/googleapiclient/discovery_cache/documents/language.v1.json
+++ b/googleapiclient/discovery_cache/documents/language.v1.json
@@ -227,7 +227,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210918",
"rootUrl": "https://language.googleapis.com/",
"schemas": {
"AnalyzeEntitiesRequest": {
diff --git a/googleapiclient/discovery_cache/documents/language.v1beta1.json b/googleapiclient/discovery_cache/documents/language.v1beta1.json
index c9d0df3b0..20abd9e15 100644
--- a/googleapiclient/discovery_cache/documents/language.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/language.v1beta1.json
@@ -189,7 +189,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210918",
"rootUrl": "https://language.googleapis.com/",
"schemas": {
"AnalyzeEntitiesRequest": {
diff --git a/googleapiclient/discovery_cache/documents/language.v1beta2.json b/googleapiclient/discovery_cache/documents/language.v1beta2.json
index 7695363e0..9ca3c9ac4 100644
--- a/googleapiclient/discovery_cache/documents/language.v1beta2.json
+++ b/googleapiclient/discovery_cache/documents/language.v1beta2.json
@@ -227,7 +227,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210918",
"rootUrl": "https://language.googleapis.com/",
"schemas": {
"AnalyzeEntitiesRequest": {
diff --git a/googleapiclient/discovery_cache/documents/libraryagent.v1.json b/googleapiclient/discovery_cache/documents/libraryagent.v1.json
index ada81fe50..3ada5e869 100644
--- a/googleapiclient/discovery_cache/documents/libraryagent.v1.json
+++ b/googleapiclient/discovery_cache/documents/libraryagent.v1.json
@@ -279,7 +279,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://libraryagent.googleapis.com/",
"schemas": {
"GoogleExampleLibraryagentV1Book": {
diff --git a/googleapiclient/discovery_cache/documents/licensing.v1.json b/googleapiclient/discovery_cache/documents/licensing.v1.json
index 0d891f6b3..7511256b7 100644
--- a/googleapiclient/discovery_cache/documents/licensing.v1.json
+++ b/googleapiclient/discovery_cache/documents/licensing.v1.json
@@ -400,7 +400,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210925",
"rootUrl": "https://licensing.googleapis.com/",
"schemas": {
"Empty": {
diff --git a/googleapiclient/discovery_cache/documents/localservices.v1.json b/googleapiclient/discovery_cache/documents/localservices.v1.json
index 94daf08f1..4852cd289 100644
--- a/googleapiclient/discovery_cache/documents/localservices.v1.json
+++ b/googleapiclient/discovery_cache/documents/localservices.v1.json
@@ -250,7 +250,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://localservices.googleapis.com/",
"schemas": {
"GoogleAdsHomeservicesLocalservicesV1AccountReport": {
diff --git a/googleapiclient/discovery_cache/documents/manufacturers.v1.json b/googleapiclient/discovery_cache/documents/manufacturers.v1.json
index f044269dc..4ef671ca2 100644
--- a/googleapiclient/discovery_cache/documents/manufacturers.v1.json
+++ b/googleapiclient/discovery_cache/documents/manufacturers.v1.json
@@ -287,7 +287,7 @@
}
}
},
- "revision": "20210911",
+ "revision": "20210923",
"rootUrl": "https://manufacturers.googleapis.com/",
"schemas": {
"Attributes": {
diff --git a/googleapiclient/discovery_cache/documents/memcache.v1.json b/googleapiclient/discovery_cache/documents/memcache.v1.json
index ec0c84b6e..9e7bfe7fd 100644
--- a/googleapiclient/discovery_cache/documents/memcache.v1.json
+++ b/googleapiclient/discovery_cache/documents/memcache.v1.json
@@ -528,7 +528,7 @@
}
}
},
- "revision": "20210907",
+ "revision": "20210917",
"rootUrl": "https://memcache.googleapis.com/",
"schemas": {
"ApplyParametersRequest": {
diff --git a/googleapiclient/discovery_cache/documents/memcache.v1beta2.json b/googleapiclient/discovery_cache/documents/memcache.v1beta2.json
index 6b020d8c0..1f66e93bd 100644
--- a/googleapiclient/discovery_cache/documents/memcache.v1beta2.json
+++ b/googleapiclient/discovery_cache/documents/memcache.v1beta2.json
@@ -556,7 +556,7 @@
}
}
},
- "revision": "20210907",
+ "revision": "20210917",
"rootUrl": "https://memcache.googleapis.com/",
"schemas": {
"ApplyParametersRequest": {
diff --git a/googleapiclient/discovery_cache/documents/metastore.v1alpha.json b/googleapiclient/discovery_cache/documents/metastore.v1alpha.json
index 3b376d88c..7983785a5 100644
--- a/googleapiclient/discovery_cache/documents/metastore.v1alpha.json
+++ b/googleapiclient/discovery_cache/documents/metastore.v1alpha.json
@@ -1170,7 +1170,7 @@
}
}
},
- "revision": "20210908",
+ "revision": "20210921",
"rootUrl": "https://metastore.googleapis.com/",
"schemas": {
"AuditConfig": {
@@ -2117,7 +2117,7 @@
},
"maintenanceWindow": {
"$ref": "MaintenanceWindow",
- "description": "The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time."
+ "description": "The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type."
},
"metadataIntegration": {
"$ref": "MetadataIntegration",
diff --git a/googleapiclient/discovery_cache/documents/metastore.v1beta.json b/googleapiclient/discovery_cache/documents/metastore.v1beta.json
index 768bfdb74..24db20aab 100644
--- a/googleapiclient/discovery_cache/documents/metastore.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/metastore.v1beta.json
@@ -986,7 +986,7 @@
}
}
},
- "revision": "20210908",
+ "revision": "20210921",
"rootUrl": "https://metastore.googleapis.com/",
"schemas": {
"AuditConfig": {
@@ -1933,7 +1933,7 @@
},
"maintenanceWindow": {
"$ref": "MaintenanceWindow",
- "description": "The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time."
+ "description": "The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type."
},
"metadataIntegration": {
"$ref": "MetadataIntegration",
diff --git a/googleapiclient/discovery_cache/documents/monitoring.v1.json b/googleapiclient/discovery_cache/documents/monitoring.v1.json
index a7bb2acc9..a5f4a4ad7 100644
--- a/googleapiclient/discovery_cache/documents/monitoring.v1.json
+++ b/googleapiclient/discovery_cache/documents/monitoring.v1.json
@@ -441,7 +441,7 @@
}
}
},
- "revision": "20210830",
+ "revision": "20210922",
"rootUrl": "https://monitoring.googleapis.com/",
"schemas": {
"Aggregation": {
@@ -1293,6 +1293,44 @@
},
"type": "object"
},
+ "TableDataSet": {
+ "description": "Groups a time series query definition with table options.",
+ "id": "TableDataSet",
+ "properties": {
+ "minAlignmentPeriod": {
+ "description": "Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.",
+ "format": "google-duration",
+ "type": "string"
+ },
+ "tableDisplayOptions": {
+ "$ref": "TableDisplayOptions",
+ "description": "Optional. Table display options for configuring how the table is rendered."
+ },
+ "tableTemplate": {
+ "description": "Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. \"${resource.labels.project_id}.\"",
+ "type": "string"
+ },
+ "timeSeriesQuery": {
+ "$ref": "TimeSeriesQuery",
+ "description": "Required. Fields for querying time series data from the Stackdriver metrics API."
+ }
+ },
+ "type": "object"
+ },
+ "TableDisplayOptions": {
+ "description": "Table display options that can be reused.",
+ "id": "TableDisplayOptions",
+ "properties": {
+ "shownColumns": {
+ "description": "Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
"Text": {
"description": "A widget that displays textual content.",
"id": "Text",
@@ -1484,6 +1522,20 @@
},
"type": "object"
},
+ "TimeSeriesTable": {
+ "description": "A table that displays time series data.",
+ "id": "TimeSeriesTable",
+ "properties": {
+ "dataSets": {
+ "description": "Required. The data displayed in this table.",
+ "items": {
+ "$ref": "TableDataSet"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
"Type": {
"description": "A protocol buffer message type.",
"id": "Type",
@@ -1552,6 +1604,10 @@
"$ref": "Text",
"description": "A raw string or markdown displaying textual content."
},
+ "timeSeriesTable": {
+ "$ref": "TimeSeriesTable",
+ "description": "A widget that displays time series data in a tabular format."
+ },
"title": {
"description": "Optional. The title of the widget.",
"type": "string"
diff --git a/googleapiclient/discovery_cache/documents/monitoring.v3.json b/googleapiclient/discovery_cache/documents/monitoring.v3.json
index 8b3e3de81..d8f6ae5e1 100644
--- a/googleapiclient/discovery_cache/documents/monitoring.v3.json
+++ b/googleapiclient/discovery_cache/documents/monitoring.v3.json
@@ -2541,7 +2541,7 @@
}
}
},
- "revision": "20210830",
+ "revision": "20210922",
"rootUrl": "https://monitoring.googleapis.com/",
"schemas": {
"Aggregation": {
diff --git a/googleapiclient/discovery_cache/documents/mybusinessaccountmanagement.v1.json b/googleapiclient/discovery_cache/documents/mybusinessaccountmanagement.v1.json
index 166edf3de..e79780a9d 100644
--- a/googleapiclient/discovery_cache/documents/mybusinessaccountmanagement.v1.json
+++ b/googleapiclient/discovery_cache/documents/mybusinessaccountmanagement.v1.json
@@ -530,7 +530,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210925",
"rootUrl": "https://mybusinessaccountmanagement.googleapis.com/",
"schemas": {
"AcceptInvitationRequest": {
diff --git a/googleapiclient/discovery_cache/documents/mybusinessbusinessinformation.v1.json b/googleapiclient/discovery_cache/documents/mybusinessbusinessinformation.v1.json
index 0a19b4e07..a74bc281d 100644
--- a/googleapiclient/discovery_cache/documents/mybusinessbusinessinformation.v1.json
+++ b/googleapiclient/discovery_cache/documents/mybusinessbusinessinformation.v1.json
@@ -662,7 +662,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210925",
"rootUrl": "https://mybusinessbusinessinformation.googleapis.com/",
"schemas": {
"AdWordsLocationExtensions": {
diff --git a/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json b/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json
index fb5265be1..78a1f5412 100644
--- a/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json
+++ b/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json
@@ -194,7 +194,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210925",
"rootUrl": "https://mybusinesslodging.googleapis.com/",
"schemas": {
"Accessibility": {
diff --git a/googleapiclient/discovery_cache/documents/mybusinessnotifications.v1.json b/googleapiclient/discovery_cache/documents/mybusinessnotifications.v1.json
index 01adf7e9b..b57f0ffc0 100644
--- a/googleapiclient/discovery_cache/documents/mybusinessnotifications.v1.json
+++ b/googleapiclient/discovery_cache/documents/mybusinessnotifications.v1.json
@@ -154,7 +154,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210925",
"rootUrl": "https://mybusinessnotifications.googleapis.com/",
"schemas": {
"NotificationSetting": {
diff --git a/googleapiclient/discovery_cache/documents/mybusinessplaceactions.v1.json b/googleapiclient/discovery_cache/documents/mybusinessplaceactions.v1.json
index d616631e2..0dd51dab2 100644
--- a/googleapiclient/discovery_cache/documents/mybusinessplaceactions.v1.json
+++ b/googleapiclient/discovery_cache/documents/mybusinessplaceactions.v1.json
@@ -281,7 +281,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210925",
"rootUrl": "https://mybusinessplaceactions.googleapis.com/",
"schemas": {
"Empty": {
diff --git a/googleapiclient/discovery_cache/documents/mybusinessverifications.v1.json b/googleapiclient/discovery_cache/documents/mybusinessverifications.v1.json
index f2746fd4c..6163af551 100644
--- a/googleapiclient/discovery_cache/documents/mybusinessverifications.v1.json
+++ b/googleapiclient/discovery_cache/documents/mybusinessverifications.v1.json
@@ -256,7 +256,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210925",
"rootUrl": "https://mybusinessverifications.googleapis.com/",
"schemas": {
"AddressVerificationData": {
diff --git a/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json b/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json
index af491488f..04a9cbf98 100644
--- a/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json
+++ b/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json
@@ -339,7 +339,7 @@
}
}
},
- "revision": "20210911",
+ "revision": "20210918",
"rootUrl": "https://ondemandscanning.googleapis.com/",
"schemas": {
"AliasContext": {
diff --git a/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json b/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json
index 62b221d54..ba43f6bd5 100644
--- a/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json
@@ -339,7 +339,7 @@
}
}
},
- "revision": "20210911",
+ "revision": "20210918",
"rootUrl": "https://ondemandscanning.googleapis.com/",
"schemas": {
"AliasContext": {
diff --git a/googleapiclient/discovery_cache/documents/orgpolicy.v2.json b/googleapiclient/discovery_cache/documents/orgpolicy.v2.json
index 40e6991cb..79f695eb3 100644
--- a/googleapiclient/discovery_cache/documents/orgpolicy.v2.json
+++ b/googleapiclient/discovery_cache/documents/orgpolicy.v2.json
@@ -751,7 +751,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://orgpolicy.googleapis.com/",
"schemas": {
"GoogleCloudOrgpolicyV2Constraint": {
diff --git a/googleapiclient/discovery_cache/documents/osconfig.v1.json b/googleapiclient/discovery_cache/documents/osconfig.v1.json
index 56343fdfa..64c6958c4 100644
--- a/googleapiclient/discovery_cache/documents/osconfig.v1.json
+++ b/googleapiclient/discovery_cache/documents/osconfig.v1.json
@@ -777,7 +777,7 @@
}
}
},
- "revision": "20210914",
+ "revision": "20210918",
"rootUrl": "https://osconfig.googleapis.com/",
"schemas": {
"AptSettings": {
diff --git a/googleapiclient/discovery_cache/documents/osconfig.v1alpha.json b/googleapiclient/discovery_cache/documents/osconfig.v1alpha.json
index 5a24e990b..e35f96efc 100644
--- a/googleapiclient/discovery_cache/documents/osconfig.v1alpha.json
+++ b/googleapiclient/discovery_cache/documents/osconfig.v1alpha.json
@@ -686,7 +686,7 @@
}
}
},
- "revision": "20210914",
+ "revision": "20210918",
"rootUrl": "https://osconfig.googleapis.com/",
"schemas": {
"CVSSv3": {
diff --git a/googleapiclient/discovery_cache/documents/osconfig.v1beta.json b/googleapiclient/discovery_cache/documents/osconfig.v1beta.json
index e6ea63a9c..68b3904d0 100644
--- a/googleapiclient/discovery_cache/documents/osconfig.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/osconfig.v1beta.json
@@ -599,7 +599,7 @@
}
}
},
- "revision": "20210914",
+ "revision": "20210918",
"rootUrl": "https://osconfig.googleapis.com/",
"schemas": {
"AptRepository": {
diff --git a/googleapiclient/discovery_cache/documents/oslogin.v1.json b/googleapiclient/discovery_cache/documents/oslogin.v1.json
index b50d8baa8..f60286706 100644
--- a/googleapiclient/discovery_cache/documents/oslogin.v1.json
+++ b/googleapiclient/discovery_cache/documents/oslogin.v1.json
@@ -5,8 +5,14 @@
"https://www.googleapis.com/auth/cloud-platform": {
"description": "See, edit, configure, and delete your Google Cloud data and see the email address for your Google Account."
},
+ "https://www.googleapis.com/auth/cloud-platform.read-only": {
+ "description": "View your data across Google Cloud services and see the email address of your Google Account"
+ },
"https://www.googleapis.com/auth/compute": {
"description": "View and manage your Google Compute Engine resources"
+ },
+ "https://www.googleapis.com/auth/compute.readonly": {
+ "description": "View your Google Compute Engine resources"
}
}
}
@@ -143,7 +149,9 @@
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute"
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
]
},
"importSshPublicKey": {
@@ -306,7 +314,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210920",
"rootUrl": "https://oslogin.googleapis.com/",
"schemas": {
"Empty": {
diff --git a/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json b/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json
index a1240c311..94b1685ef 100644
--- a/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json
+++ b/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json
@@ -374,7 +374,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210920",
"rootUrl": "https://oslogin.googleapis.com/",
"schemas": {
"Empty": {
diff --git a/googleapiclient/discovery_cache/documents/oslogin.v1beta.json b/googleapiclient/discovery_cache/documents/oslogin.v1beta.json
index f2c5715a8..cd643e25a 100644
--- a/googleapiclient/discovery_cache/documents/oslogin.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/oslogin.v1beta.json
@@ -344,7 +344,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210920",
"rootUrl": "https://oslogin.googleapis.com/",
"schemas": {
"Empty": {
diff --git a/googleapiclient/discovery_cache/documents/pagespeedonline.v5.json b/googleapiclient/discovery_cache/documents/pagespeedonline.v5.json
index 834029cc3..695da8154 100644
--- a/googleapiclient/discovery_cache/documents/pagespeedonline.v5.json
+++ b/googleapiclient/discovery_cache/documents/pagespeedonline.v5.json
@@ -193,7 +193,7 @@
}
}
},
- "revision": "20210917",
+ "revision": "20210924",
"rootUrl": "https://pagespeedonline.googleapis.com/",
"schemas": {
"AuditRefs": {
diff --git a/googleapiclient/discovery_cache/documents/paymentsresellersubscription.v1.json b/googleapiclient/discovery_cache/documents/paymentsresellersubscription.v1.json
index 794aae119..ccacee395 100644
--- a/googleapiclient/discovery_cache/documents/paymentsresellersubscription.v1.json
+++ b/googleapiclient/discovery_cache/documents/paymentsresellersubscription.v1.json
@@ -366,7 +366,7 @@
}
}
},
- "revision": "20210920",
+ "revision": "20210927",
"rootUrl": "https://paymentsresellersubscription.googleapis.com/",
"schemas": {
"GoogleCloudPaymentsResellerSubscriptionV1CancelSubscriptionRequest": {
@@ -386,6 +386,7 @@
"CANCELLATION_REASON_PAST_DUE",
"CANCELLATION_REASON_ACCOUNT_CLOSED",
"CANCELLATION_REASON_UPGRADE_DOWNGRADE",
+ "CANCELLATION_REASON_USER_DELINQUENCY",
"CANCELLATION_REASON_OTHER"
],
"enumDescriptions": [
@@ -396,6 +397,7 @@
"Payment is past due.",
"User account closed.",
"Used for notification only, do not use in Cancel API. Cancellation due to upgrade or downgrade.",
+ "Cancellation due to user delinquency",
"Other reason."
],
"type": "string"
@@ -761,6 +763,7 @@
"CANCELLATION_REASON_PAST_DUE",
"CANCELLATION_REASON_ACCOUNT_CLOSED",
"CANCELLATION_REASON_UPGRADE_DOWNGRADE",
+ "CANCELLATION_REASON_USER_DELINQUENCY",
"CANCELLATION_REASON_OTHER"
],
"enumDescriptions": [
@@ -771,6 +774,7 @@
"Payment is past due.",
"User account closed.",
"Used for notification only, do not use in Cancel API. Cancellation due to upgrade or downgrade.",
+ "Cancellation due to user delinquency",
"Other reason."
],
"type": "string"
diff --git a/googleapiclient/discovery_cache/documents/people.v1.json b/googleapiclient/discovery_cache/documents/people.v1.json
index 2b36c0b62..092fcf2a3 100644
--- a/googleapiclient/discovery_cache/documents/people.v1.json
+++ b/googleapiclient/discovery_cache/documents/people.v1.json
@@ -1172,7 +1172,7 @@
}
}
},
- "revision": "20210916",
+ "revision": "20210923",
"rootUrl": "https://people.googleapis.com/",
"schemas": {
"Address": {
diff --git a/googleapiclient/discovery_cache/documents/playablelocations.v3.json b/googleapiclient/discovery_cache/documents/playablelocations.v3.json
index 27232a087..3fc3edaed 100644
--- a/googleapiclient/discovery_cache/documents/playablelocations.v3.json
+++ b/googleapiclient/discovery_cache/documents/playablelocations.v3.json
@@ -146,7 +146,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210925",
"rootUrl": "https://playablelocations.googleapis.com/",
"schemas": {
"GoogleMapsPlayablelocationsV3Impression": {
diff --git a/googleapiclient/discovery_cache/documents/playcustomapp.v1.json b/googleapiclient/discovery_cache/documents/playcustomapp.v1.json
index 56ad02939..633222740 100644
--- a/googleapiclient/discovery_cache/documents/playcustomapp.v1.json
+++ b/googleapiclient/discovery_cache/documents/playcustomapp.v1.json
@@ -158,7 +158,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210921",
"rootUrl": "https://playcustomapp.googleapis.com/",
"schemas": {
"CustomApp": {
diff --git a/googleapiclient/discovery_cache/documents/policyanalyzer.v1.json b/googleapiclient/discovery_cache/documents/policyanalyzer.v1.json
index f0157211e..ce364721f 100644
--- a/googleapiclient/discovery_cache/documents/policyanalyzer.v1.json
+++ b/googleapiclient/discovery_cache/documents/policyanalyzer.v1.json
@@ -123,7 +123,7 @@
],
"parameters": {
"filter": {
- "description": "Optional. Filter expression to restrict the activities returned. Supported filters are: - service_account_last_authn.full_resource_name {=} [STRING] - service_account_key_last_authn.full_resource_name {=} [STRING]",
+ "description": "Optional. Filter expression to restrict the activities returned. For serviceAccountLastAuthentication activities, supported filters are: - `activities.full_resource_name {=} [STRING]` - `activities.fullResourceName {=} [STRING]` where `[STRING]` is the full resource name of the service account. For serviceAccountKeyLastAuthentication activities, supported filters are: - `activities.full_resource_name {=} [STRING]` - `activities.fullResourceName {=} [STRING]` where `[STRING]` is the full resource name of the service account key.",
"location": "query",
"type": "string"
},
@@ -163,7 +163,7 @@
}
}
},
- "revision": "20210906",
+ "revision": "20210918",
"rootUrl": "https://policyanalyzer.googleapis.com/",
"schemas": {
"GoogleCloudPolicyanalyzerV1Activity": {
diff --git a/googleapiclient/discovery_cache/documents/policyanalyzer.v1beta1.json b/googleapiclient/discovery_cache/documents/policyanalyzer.v1beta1.json
index 4c87892ad..7be8772e4 100644
--- a/googleapiclient/discovery_cache/documents/policyanalyzer.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/policyanalyzer.v1beta1.json
@@ -163,7 +163,7 @@
}
}
},
- "revision": "20210906",
+ "revision": "20210918",
"rootUrl": "https://policyanalyzer.googleapis.com/",
"schemas": {
"GoogleCloudPolicyanalyzerV1beta1Activity": {
diff --git a/googleapiclient/discovery_cache/documents/policysimulator.v1.json b/googleapiclient/discovery_cache/documents/policysimulator.v1.json
index b9562cc46..8f48abd85 100644
--- a/googleapiclient/discovery_cache/documents/policysimulator.v1.json
+++ b/googleapiclient/discovery_cache/documents/policysimulator.v1.json
@@ -493,7 +493,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210918",
"rootUrl": "https://policysimulator.googleapis.com/",
"schemas": {
"GoogleCloudPolicysimulatorV1AccessStateDiff": {
diff --git a/googleapiclient/discovery_cache/documents/policysimulator.v1beta1.json b/googleapiclient/discovery_cache/documents/policysimulator.v1beta1.json
index e8295e65c..816fc7d4f 100644
--- a/googleapiclient/discovery_cache/documents/policysimulator.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/policysimulator.v1beta1.json
@@ -493,7 +493,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210918",
"rootUrl": "https://policysimulator.googleapis.com/",
"schemas": {
"GoogleCloudPolicysimulatorV1Replay": {
diff --git a/googleapiclient/discovery_cache/documents/policytroubleshooter.v1.json b/googleapiclient/discovery_cache/documents/policytroubleshooter.v1.json
index bf0d7232a..9907d1be9 100644
--- a/googleapiclient/discovery_cache/documents/policytroubleshooter.v1.json
+++ b/googleapiclient/discovery_cache/documents/policytroubleshooter.v1.json
@@ -128,7 +128,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210918",
"rootUrl": "https://policytroubleshooter.googleapis.com/",
"schemas": {
"GoogleCloudPolicytroubleshooterV1AccessTuple": {
diff --git a/googleapiclient/discovery_cache/documents/policytroubleshooter.v1beta.json b/googleapiclient/discovery_cache/documents/policytroubleshooter.v1beta.json
index 86fc591f5..00888c8db 100644
--- a/googleapiclient/discovery_cache/documents/policytroubleshooter.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/policytroubleshooter.v1beta.json
@@ -128,7 +128,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210918",
"rootUrl": "https://policytroubleshooter.googleapis.com/",
"schemas": {
"GoogleCloudPolicytroubleshooterV1betaAccessTuple": {
diff --git a/googleapiclient/discovery_cache/documents/prod_tt_sasportal.v1alpha1.json b/googleapiclient/discovery_cache/documents/prod_tt_sasportal.v1alpha1.json
index 6c8b9975f..1684ec1ce 100644
--- a/googleapiclient/discovery_cache/documents/prod_tt_sasportal.v1alpha1.json
+++ b/googleapiclient/discovery_cache/documents/prod_tt_sasportal.v1alpha1.json
@@ -2484,7 +2484,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://prod-tt-sasportal.googleapis.com/",
"schemas": {
"SasPortalAssignment": {
diff --git a/googleapiclient/discovery_cache/documents/pubsub.v1.json b/googleapiclient/discovery_cache/documents/pubsub.v1.json
index e068a742d..bf85e3410 100644
--- a/googleapiclient/discovery_cache/documents/pubsub.v1.json
+++ b/googleapiclient/discovery_cache/documents/pubsub.v1.json
@@ -1424,7 +1424,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210918",
"rootUrl": "https://pubsub.googleapis.com/",
"schemas": {
"AcknowledgeRequest": {
diff --git a/googleapiclient/discovery_cache/documents/pubsub.v1beta1a.json b/googleapiclient/discovery_cache/documents/pubsub.v1beta1a.json
index 29a661796..5e6004b25 100644
--- a/googleapiclient/discovery_cache/documents/pubsub.v1beta1a.json
+++ b/googleapiclient/discovery_cache/documents/pubsub.v1beta1a.json
@@ -457,7 +457,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210918",
"rootUrl": "https://pubsub.googleapis.com/",
"schemas": {
"AcknowledgeRequest": {
diff --git a/googleapiclient/discovery_cache/documents/pubsub.v1beta2.json b/googleapiclient/discovery_cache/documents/pubsub.v1beta2.json
index 33f146ba2..d060bee1c 100644
--- a/googleapiclient/discovery_cache/documents/pubsub.v1beta2.json
+++ b/googleapiclient/discovery_cache/documents/pubsub.v1beta2.json
@@ -724,7 +724,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210918",
"rootUrl": "https://pubsub.googleapis.com/",
"schemas": {
"AcknowledgeRequest": {
diff --git a/googleapiclient/discovery_cache/documents/pubsublite.v1.json b/googleapiclient/discovery_cache/documents/pubsublite.v1.json
index 4509b6977..124fd464b 100644
--- a/googleapiclient/discovery_cache/documents/pubsublite.v1.json
+++ b/googleapiclient/discovery_cache/documents/pubsublite.v1.json
@@ -1040,7 +1040,7 @@
}
}
},
- "revision": "20210913",
+ "revision": "20210920",
"rootUrl": "https://pubsublite.googleapis.com/",
"schemas": {
"CancelOperationRequest": {
diff --git a/googleapiclient/discovery_cache/documents/realtimebidding.v1.json b/googleapiclient/discovery_cache/documents/realtimebidding.v1.json
index 190b2e8e4..6a961b3ab 100644
--- a/googleapiclient/discovery_cache/documents/realtimebidding.v1.json
+++ b/googleapiclient/discovery_cache/documents/realtimebidding.v1.json
@@ -1140,7 +1140,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210925",
"rootUrl": "https://realtimebidding.googleapis.com/",
"schemas": {
"ActivatePretargetingConfigRequest": {
diff --git a/googleapiclient/discovery_cache/documents/realtimebidding.v1alpha.json b/googleapiclient/discovery_cache/documents/realtimebidding.v1alpha.json
index 5a8878f0d..3b898a808 100644
--- a/googleapiclient/discovery_cache/documents/realtimebidding.v1alpha.json
+++ b/googleapiclient/discovery_cache/documents/realtimebidding.v1alpha.json
@@ -234,7 +234,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210925",
"rootUrl": "https://realtimebidding.googleapis.com/",
"schemas": {
"ActivateBiddingFunctionRequest": {
@@ -286,7 +286,7 @@
"enumDescriptions": [
"Default value that should not be used.",
"Bidding function that can be used by Authorized Buyers in the original TURTLEDOVE simulation. See documentation on the TURTLEDOVE simulation at https://developers.google.com/authorized-buyers/rtb/turtledove. The function takes in a Javascript object, `inputs`, that contains the following named fields: `openrtbContextualBidRequest` OR `googleContextualBidRequest`, `customContextualSignal`, `interestBasedBidData`, `interestGroupData`, and returns the bid price CPM. Example: ``` /* Returns a bid price CPM. * * @param {Object} inputs an object with the * following named fields: * - openrtbContextualBidRequest * OR googleContextualBidRequest * - customContextualSignal * - interestBasedBidData * - interestGroupData */ function biddingFunction(inputs) { ... return inputs.interestBasedBidData.cpm * inputs.customContextualSignals.placementMultiplier; } ```",
- "Buyer's interest group bidding function that can be used by Authorized Buyers in the FLEDGE simulation. See the FLEDGE explainer at https://github.com/WICG/turtledove/blob/main/FLEDGE.md#32-on-device-bidding. The function takes one argument, `inputs`, that contains an object with the following named fields of the form: ``` { \"interestGroup\" : { \"ad\" : [ \"buyerCreativeId\": \"...\", # Ad creative ID \"adData\": { # JSON object } ], \"userBiddingSignals\": { . # JSON object } }, \"auctionSignals\": { \"url\": # string, \"slotVisibility\": # enum value, \"slotDimensions\": [ { \"height\": # number value \"width\": # number value } ] }, \"perBuyerSignals\": { # JSON object }, \"trustedBiddingSignals\": { # JSON object }, \"browserSignals\": { \"recent_impression_ages_secs\": [ # Array of integers. Not yet populated. ] } } ``` `interestGroup`: An object containing a list of `ad` objects, which contain the following named fields: - `buyerCreativeId`: The ad creative ID string. - `adData`: Any JSON value of the bidder's choosing to contain data associated with an ad provided in `BidResponse.ad.adslot.ad_data` for the Google Authorized Buyers protocol and `BidResponse.seatbid.bid.ext.ad_data` for the OpenRTB protocol. - `userBiddingSignals`: Any JSON value of the bidder's choosing containing interest group data that corresponds to user_bidding_signals (as in FLEDGE). This field will be populated from `BidResponse.interest_group_map.user_bidding_signals` for Google Authorized Buyers protocol and `BidResponse.ext.interest_group_map.user_bidding_signals` for the OpenRTB protocol. `auctionSignals`: Contains data from the seller. It corresponds to the auction signals data described in the FLEDGE proposal. It is an object containing the following named fields: - `url`: The string URL of the page with parameters removed. - `slotVisibility`: Enum of one of the following potential values: - NO_DETECTION = 0 - ABOVE_THE_FOLD = 1 - BELOW_THE_FOLD = 2 - `slotDimensions`: A list of objects containing containing width and height pairs in `width` and `height` fields, respectively, from `BidRequest.adslot.width` and `BidRequest.adslot.height` for the Google Authorized Buyers protocol and `BidRequest.imp.banner.format.w` and `BidRequest.imp.banner.format.h` for the OpenRTB protocol. `perBuyerSignals`: The contextual signals from the bid response that are populated in `BidResponse.interest_group_bidding.interest_group_buyers.per_buyer_signals` for the Google Authorized Buyers protocol and `BidResponse.ext.interest_group_bidding.interest_group_buyers.per_buyer_signals` for the OpenRTB protocol. These signals can be of any JSON format of your choosing, however, the buyer's domain name must match between: - the interest group response in `BidResponse.interest_group_map.buyer_domain` for the Google Authorized Buyers protocol or in `BidResponse.ext.interest_group_map.buyer_domain` for the OpenRTB protocol. - the contextual response as a key to the map in `BidResponse.interest_group_bidding.interest_group_buyers` for the Google Authorized Buyers protocol or in `BidResponse.ext.interest_group_bidding.interest_group_buyers` for the OpenRTB protocol. In other words, there must be a match between the buyer domain of the contextual per_buyer_signals and the domain of an interest group. `trustedBiddingSignals`: The trusted bidding signals that corresponds to the trusted_bidding_signals in the FLEDGE proposal. It is provided in the interest group response as `BidResponse.interest_group_map.user_bidding_signals` for the Google Authorized Buyers protocol and `BidResponse.ext.interest_group_map.user_bidding_signals` for the OpenRTB protocol. This field can be any JSON format of your choosing. `browserSignals`: An object of simulated browser-provider signals. It is an object with a single named field, `recent_impression_ages_secs`, that contains a list of estimated number value recent impression ages in seconds for a given interest group. `recent_impression_ages_secs` is not yet populated. The function returns the string creative ID of the selected ad, the bid price CPM, and (optionally) selected product IDs. In addition, the bidding function may populate an optional string debug token that may be useful for remote debugging of a bidding function performing unexpectedly. This debug string is available in `BidResponseFeedback` (https://developers.google.com/authorized-buyers/rtb/realtime-bidding-guide#bidresponsefeedback-object) and BidFeedback (https://developers.google.com/authorized-buyers/rtb/openrtb-guide#bidfeedback), for the Google protocol and openRTB protocol respectively. Example: ``` function biddingFunction(inputs) { ... return { \"buyerCreativeId\": \"ad_creative_id_1\", \"bidPriceCpm\": 0.3, \"productIds\": [\"product_id_1\", \"product_id_2\", \"product_id_3\"] \"debugString\": \"Bidding function executed successfully!\" } } ```"
+ "Buyer's interest group bidding function that can be used by Authorized Buyers in the FLEDGE simulation. See the FLEDGE explainer at https://github.com/WICG/turtledove/blob/main/FLEDGE.md#32-on-device-bidding. The function takes one argument, `inputs`, that contains an object with the following named fields of the form: ``` { \"interestGroup\" : { \"ad\" : [ \"buyerCreativeId\": \"...\", # Ad creative ID \"adData\": { # JSON object } ], \"userBiddingSignals\": { . # JSON object } }, \"auctionSignals\": { \"url\": # string, \"slotVisibility\": # enum value, \"slotDimensions\": [ { \"height\": # number value \"width\": # number value } ] }, \"perBuyerSignals\": { # JSON object }, \"trustedBiddingSignals\": { # JSON object }, \"browserSignals\": { \"recent_impression_ages_secs\": [ # Array of integers. Not yet populated. ] } } ``` `interestGroup`: An object containing a list of `ad` objects, which contain the following named fields: - `buyerCreativeId`: The ad creative ID string. - `adData`: Any JSON value of the bidder's choosing to contain data associated with an ad provided in `BidResponse.ad.adslot.ad_data` for the Google Authorized Buyers protocol and `BidResponse.seatbid.bid.ext.ad_data` for the OpenRTB protocol. - `userBiddingSignals`: Any JSON value of the bidder's choosing containing interest group data that corresponds to user_bidding_signals (as in FLEDGE). This field will be populated from `BidResponse.interest_group_map.user_bidding_signals` for Google Authorized Buyers protocol and `BidResponse.ext.interest_group_map.user_bidding_signals` for the OpenRTB protocol. `auctionSignals`: Contains data from the seller. It corresponds to the auction signals data described in the FLEDGE proposal. It is an object containing the following named fields: - `url`: The string URL of the page with parameters removed. - `slotVisibility`: Enum of one of the following potential values: - NO_DETECTION = 0 - ABOVE_THE_FOLD = 1 - BELOW_THE_FOLD = 2 - `slotDimensions`: A list of objects containing containing width and height pairs in `width` and `height` fields, respectively, from `BidRequest.adslot.width` and `BidRequest.adslot.height` for the Google Authorized Buyers protocol and `BidRequest.imp.banner.format.w` and `BidRequest.imp.banner.format.h` for the OpenRTB protocol. `perBuyerSignals`: The contextual signals from the bid response that are populated in `BidResponse.interest_group_bidding.interest_group_buyers.per_buyer_signals` for the Google Authorized Buyers protocol and `BidResponse.ext.interest_group_bidding.interest_group_buyers.per_buyer_signals` for the OpenRTB protocol. These signals can be of any JSON format of your choosing, however, the buyer's domain name must match between: - the interest group response in `BidResponse.interest_group_map.buyer_domain` for the Google Authorized Buyers protocol or in `BidResponse.ext.interest_group_map.buyer_domain` for the OpenRTB protocol. - the contextual response as a key to the map in `BidResponse.interest_group_bidding.interest_group_buyers` for the Google Authorized Buyers protocol or in `BidResponse.ext.interest_group_bidding.interest_group_buyers` for the OpenRTB protocol. In other words, there must be a match between the buyer domain of the contextual per_buyer_signals and the domain of an interest group. `trustedBiddingSignals`: The trusted bidding signals that corresponds to the trusted_bidding_signals in the FLEDGE proposal. It is provided in the interest group response as `BidResponse.interest_group_map.user_bidding_signals` for the Google Authorized Buyers protocol and `BidResponse.ext.interest_group_map.user_bidding_signals` for the OpenRTB protocol. This field can be any JSON format of your choosing. `browserSignals`: An object of simulated browser-provider signals. It is an object with a single named field, `recent_impression_ages_secs`, that contains a list of estimated number value recent impression ages in seconds for a given interest group. `recent_impression_ages_secs` is not yet populated. The function returns the string creative ID of the selected ad, the bid price CPM, and (optionally) selected product IDs. In addition, the bidding function may populate an optional debug string that may be used for remote debugging and troubleshooting of a bidder-provided bidding function. The debug string should not contain a user identifier. The maximum length of the debug string is 200 bytes. This debug string is available in `BidResponseFeedback` (https://developers.google.com/authorized-buyers/rtb/realtime-bidding-guide#bidresponsefeedback-object) and `BidFeedback` (https://developers.google.com/authorized-buyers/rtb/openrtb-guide#bidfeedback), for the Google protocol and OpenRTB protocol respectively. In addition, the debug string can be inserted into the creative HTML snippet via macro substitution if the following string is included in the snippet: \u201c%%DEBUG_STRING%%\u201d. Please ensure the debug string complies with [Platform Program Policies](https://support.google.com/platformspolicy/answer/3013851). Sample Bidding Function: ``` function biddingFunction(inputs) { ... return { \"buyerCreativeId\": \"ad_creative_id_1\", \"bidPriceCpm\": 0.3, \"productIds\": [\"product_id_1\", \"product_id_2\", \"product_id_3\"] \"debugString\": \"Bidding function executed successfully!\" } } ```"
],
"type": "string"
}
diff --git a/googleapiclient/discovery_cache/documents/recaptchaenterprise.v1.json b/googleapiclient/discovery_cache/documents/recaptchaenterprise.v1.json
index 429bdfea3..13544e2ea 100644
--- a/googleapiclient/discovery_cache/documents/recaptchaenterprise.v1.json
+++ b/googleapiclient/discovery_cache/documents/recaptchaenterprise.v1.json
@@ -375,7 +375,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210924",
"rootUrl": "https://recaptchaenterprise.googleapis.com/",
"schemas": {
"GoogleCloudRecaptchaenterpriseV1AndroidKeySettings": {
diff --git a/googleapiclient/discovery_cache/documents/recommender.v1.json b/googleapiclient/discovery_cache/documents/recommender.v1.json
index 3bee0ea94..78743aa2b 100644
--- a/googleapiclient/discovery_cache/documents/recommender.v1.json
+++ b/googleapiclient/discovery_cache/documents/recommender.v1.json
@@ -1178,7 +1178,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210918",
"rootUrl": "https://recommender.googleapis.com/",
"schemas": {
"GoogleCloudRecommenderV1CostProjection": {
diff --git a/googleapiclient/discovery_cache/documents/recommender.v1beta1.json b/googleapiclient/discovery_cache/documents/recommender.v1beta1.json
index 34f99eb1d..4eab729f5 100644
--- a/googleapiclient/discovery_cache/documents/recommender.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/recommender.v1beta1.json
@@ -1178,7 +1178,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210918",
"rootUrl": "https://recommender.googleapis.com/",
"schemas": {
"GoogleCloudRecommenderV1beta1CostProjection": {
diff --git a/googleapiclient/discovery_cache/documents/reseller.v1.json b/googleapiclient/discovery_cache/documents/reseller.v1.json
index b922aa1ed..ec1c63314 100644
--- a/googleapiclient/discovery_cache/documents/reseller.v1.json
+++ b/googleapiclient/discovery_cache/documents/reseller.v1.json
@@ -631,7 +631,7 @@
}
}
},
- "revision": "20210913",
+ "revision": "20210921",
"rootUrl": "https://reseller.googleapis.com/",
"schemas": {
"Address": {
diff --git a/googleapiclient/discovery_cache/documents/resourcesettings.v1.json b/googleapiclient/discovery_cache/documents/resourcesettings.v1.json
index 19defd9fb..166977e38 100644
--- a/googleapiclient/discovery_cache/documents/resourcesettings.v1.json
+++ b/googleapiclient/discovery_cache/documents/resourcesettings.v1.json
@@ -499,7 +499,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://resourcesettings.googleapis.com/",
"schemas": {
"GoogleCloudResourcesettingsV1ListSettingsResponse": {
diff --git a/googleapiclient/discovery_cache/documents/retail.v2.json b/googleapiclient/discovery_cache/documents/retail.v2.json
index 437c36b28..d7c76af89 100644
--- a/googleapiclient/discovery_cache/documents/retail.v2.json
+++ b/googleapiclient/discovery_cache/documents/retail.v2.json
@@ -527,7 +527,7 @@
"type": "boolean"
},
"name": {
- "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be \"default_branch\".",
+ "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+/branches/[^/]+/products/.*$",
"required": true,
@@ -589,7 +589,7 @@
],
"parameters": {
"name": {
- "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be \"default_branch\".",
+ "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+/branches/[^/]+/products/.*$",
"required": true,
@@ -1007,7 +1007,7 @@
}
}
},
- "revision": "20210909",
+ "revision": "20210924",
"rootUrl": "https://retail.googleapis.com/",
"schemas": {
"GoogleApiHttpBody": {
@@ -1282,7 +1282,7 @@
"type": "array"
},
"colors": {
- "description": "The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single \"Mixed\" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).",
+ "description": "The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single \"Mixed\" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).",
"items": {
"type": "string"
},
@@ -1981,7 +1981,7 @@
"type": "array"
},
"name": {
- "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be \"default_branch\".",
+ "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.",
"type": "string"
},
"patterns": {
@@ -2924,6 +2924,12 @@
"properties": {},
"type": "object"
},
+ "GoogleCloudRetailV2alphaEnrollSolutionMetadata": {
+ "description": "Metadata related to the EnrollSolution method. This will be returned by the google.longrunning.Operation.metadata field.",
+ "id": "GoogleCloudRetailV2alphaEnrollSolutionMetadata",
+ "properties": {},
+ "type": "object"
+ },
"GoogleCloudRetailV2alphaExportErrorsConfig": {
"description": "Configuration of destination for Export related errors.",
"id": "GoogleCloudRetailV2alphaExportErrorsConfig",
diff --git a/googleapiclient/discovery_cache/documents/retail.v2alpha.json b/googleapiclient/discovery_cache/documents/retail.v2alpha.json
index a118f6bcb..bd9a074b5 100644
--- a/googleapiclient/discovery_cache/documents/retail.v2alpha.json
+++ b/googleapiclient/discovery_cache/documents/retail.v2alpha.json
@@ -532,7 +532,7 @@
"type": "boolean"
},
"name": {
- "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be \"default_branch\".",
+ "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+/branches/[^/]+/products/.*$",
"required": true,
@@ -594,7 +594,7 @@
],
"parameters": {
"name": {
- "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be \"default_branch\".",
+ "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+/branches/[^/]+/products/.*$",
"required": true,
@@ -1012,7 +1012,7 @@
}
}
},
- "revision": "20210909",
+ "revision": "20210924",
"rootUrl": "https://retail.googleapis.com/",
"schemas": {
"GoogleApiHttpBody": {
@@ -1480,7 +1480,7 @@
"type": "array"
},
"colors": {
- "description": "The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single \"Mixed\" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).",
+ "description": "The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single \"Mixed\" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).",
"items": {
"type": "string"
},
@@ -1604,6 +1604,12 @@
},
"type": "object"
},
+ "GoogleCloudRetailV2alphaEnrollSolutionMetadata": {
+ "description": "Metadata related to the EnrollSolution method. This will be returned by the google.longrunning.Operation.metadata field.",
+ "id": "GoogleCloudRetailV2alphaEnrollSolutionMetadata",
+ "properties": {},
+ "type": "object"
+ },
"GoogleCloudRetailV2alphaExportErrorsConfig": {
"description": "Configuration of destination for Export related errors.",
"id": "GoogleCloudRetailV2alphaExportErrorsConfig",
@@ -2285,7 +2291,7 @@
"type": "array"
},
"name": {
- "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be \"default_branch\".",
+ "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.",
"type": "string"
},
"patterns": {
diff --git a/googleapiclient/discovery_cache/documents/retail.v2beta.json b/googleapiclient/discovery_cache/documents/retail.v2beta.json
index 7a3594b45..2d5f4fdc8 100644
--- a/googleapiclient/discovery_cache/documents/retail.v2beta.json
+++ b/googleapiclient/discovery_cache/documents/retail.v2beta.json
@@ -527,7 +527,7 @@
"type": "boolean"
},
"name": {
- "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be \"default_branch\".",
+ "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+/branches/[^/]+/products/.*$",
"required": true,
@@ -589,7 +589,7 @@
],
"parameters": {
"name": {
- "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be \"default_branch\".",
+ "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+/branches/[^/]+/products/.*$",
"required": true,
@@ -1007,7 +1007,7 @@
}
}
},
- "revision": "20210909",
+ "revision": "20210924",
"rootUrl": "https://retail.googleapis.com/",
"schemas": {
"GoogleApiHttpBody": {
@@ -1361,6 +1361,12 @@
"properties": {},
"type": "object"
},
+ "GoogleCloudRetailV2alphaEnrollSolutionMetadata": {
+ "description": "Metadata related to the EnrollSolution method. This will be returned by the google.longrunning.Operation.metadata field.",
+ "id": "GoogleCloudRetailV2alphaEnrollSolutionMetadata",
+ "properties": {},
+ "type": "object"
+ },
"GoogleCloudRetailV2alphaExportErrorsConfig": {
"description": "Configuration of destination for Export related errors.",
"id": "GoogleCloudRetailV2alphaExportErrorsConfig",
@@ -1724,7 +1730,7 @@
"type": "array"
},
"colors": {
- "description": "The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single \"Mixed\" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).",
+ "description": "The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single \"Mixed\" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).",
"items": {
"type": "string"
},
@@ -2487,7 +2493,7 @@
"type": "array"
},
"name": {
- "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be \"default_branch\".",
+ "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.",
"type": "string"
},
"patterns": {
diff --git a/googleapiclient/discovery_cache/documents/run.v1.json b/googleapiclient/discovery_cache/documents/run.v1.json
index a6c67e97b..85171ff19 100644
--- a/googleapiclient/discovery_cache/documents/run.v1.json
+++ b/googleapiclient/discovery_cache/documents/run.v1.json
@@ -1736,7 +1736,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210917",
"rootUrl": "https://run.googleapis.com/",
"schemas": {
"Addressable": {
diff --git a/googleapiclient/discovery_cache/documents/run.v1alpha1.json b/googleapiclient/discovery_cache/documents/run.v1alpha1.json
index e897cde5c..4e43bbb66 100644
--- a/googleapiclient/discovery_cache/documents/run.v1alpha1.json
+++ b/googleapiclient/discovery_cache/documents/run.v1alpha1.json
@@ -268,7 +268,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210917",
"rootUrl": "https://run.googleapis.com/",
"schemas": {
"ConfigMapEnvSource": {
diff --git a/googleapiclient/discovery_cache/documents/runtimeconfig.v1.json b/googleapiclient/discovery_cache/documents/runtimeconfig.v1.json
index d010e02a1..5d9d69142 100644
--- a/googleapiclient/discovery_cache/documents/runtimeconfig.v1.json
+++ b/googleapiclient/discovery_cache/documents/runtimeconfig.v1.json
@@ -210,7 +210,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210920",
"rootUrl": "https://runtimeconfig.googleapis.com/",
"schemas": {
"CancelOperationRequest": {
diff --git a/googleapiclient/discovery_cache/documents/runtimeconfig.v1beta1.json b/googleapiclient/discovery_cache/documents/runtimeconfig.v1beta1.json
index 06f593b6a..6ecc0c5f7 100644
--- a/googleapiclient/discovery_cache/documents/runtimeconfig.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/runtimeconfig.v1beta1.json
@@ -805,7 +805,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210925",
"rootUrl": "https://runtimeconfig.googleapis.com/",
"schemas": {
"Binding": {
diff --git a/googleapiclient/discovery_cache/documents/safebrowsing.v4.json b/googleapiclient/discovery_cache/documents/safebrowsing.v4.json
index 4295ff741..9d9514b45 100644
--- a/googleapiclient/discovery_cache/documents/safebrowsing.v4.json
+++ b/googleapiclient/discovery_cache/documents/safebrowsing.v4.json
@@ -261,7 +261,7 @@
}
}
},
- "revision": "20210914",
+ "revision": "20210924",
"rootUrl": "https://safebrowsing.googleapis.com/",
"schemas": {
"GoogleProtobufEmpty": {
diff --git a/googleapiclient/discovery_cache/documents/secretmanager.v1.json b/googleapiclient/discovery_cache/documents/secretmanager.v1.json
index 5a705c0cc..3f512f58f 100644
--- a/googleapiclient/discovery_cache/documents/secretmanager.v1.json
+++ b/googleapiclient/discovery_cache/documents/secretmanager.v1.json
@@ -643,7 +643,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210917",
"rootUrl": "https://secretmanager.googleapis.com/",
"schemas": {
"AccessSecretVersionResponse": {
diff --git a/googleapiclient/discovery_cache/documents/secretmanager.v1beta1.json b/googleapiclient/discovery_cache/documents/secretmanager.v1beta1.json
index 712a5ac1e..55ffbc70b 100644
--- a/googleapiclient/discovery_cache/documents/secretmanager.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/secretmanager.v1beta1.json
@@ -628,7 +628,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210917",
"rootUrl": "https://secretmanager.googleapis.com/",
"schemas": {
"AccessSecretVersionResponse": {
diff --git a/googleapiclient/discovery_cache/documents/securitycenter.v1.json b/googleapiclient/discovery_cache/documents/securitycenter.v1.json
index 6fe3c6ead..86124a26c 100644
--- a/googleapiclient/discovery_cache/documents/securitycenter.v1.json
+++ b/googleapiclient/discovery_cache/documents/securitycenter.v1.json
@@ -1816,7 +1816,7 @@
}
}
},
- "revision": "20210917",
+ "revision": "20210923",
"rootUrl": "https://securitycenter.googleapis.com/",
"schemas": {
"Asset": {
diff --git a/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json b/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json
index b0b7b0e29..dd78dd6b4 100644
--- a/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json
@@ -896,7 +896,7 @@
}
}
},
- "revision": "20210917",
+ "revision": "20210923",
"rootUrl": "https://securitycenter.googleapis.com/",
"schemas": {
"Asset": {
diff --git a/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json b/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json
index 1dc6e4c11..8a67a432e 100644
--- a/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json
+++ b/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json
@@ -1328,7 +1328,7 @@
}
}
},
- "revision": "20210917",
+ "revision": "20210923",
"rootUrl": "https://securitycenter.googleapis.com/",
"schemas": {
"Config": {
diff --git a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json
index 115d47d67..cf84bebb6 100644
--- a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json
+++ b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json
@@ -542,7 +542,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210924",
"rootUrl": "https://serviceconsumermanagement.googleapis.com/",
"schemas": {
"AddTenantProjectRequest": {
diff --git a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json
index ace234840..f1c04f9c2 100644
--- a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json
@@ -500,7 +500,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210924",
"rootUrl": "https://serviceconsumermanagement.googleapis.com/",
"schemas": {
"Api": {
diff --git a/googleapiclient/discovery_cache/documents/servicecontrol.v1.json b/googleapiclient/discovery_cache/documents/servicecontrol.v1.json
index 8c8ed08ea..c19946010 100644
--- a/googleapiclient/discovery_cache/documents/servicecontrol.v1.json
+++ b/googleapiclient/discovery_cache/documents/servicecontrol.v1.json
@@ -197,7 +197,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210916",
"rootUrl": "https://servicecontrol.googleapis.com/",
"schemas": {
"AllocateInfo": {
diff --git a/googleapiclient/discovery_cache/documents/servicecontrol.v2.json b/googleapiclient/discovery_cache/documents/servicecontrol.v2.json
index ff0de412a..0c584cd06 100644
--- a/googleapiclient/discovery_cache/documents/servicecontrol.v2.json
+++ b/googleapiclient/discovery_cache/documents/servicecontrol.v2.json
@@ -169,7 +169,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210916",
"rootUrl": "https://servicecontrol.googleapis.com/",
"schemas": {
"Api": {
diff --git a/googleapiclient/discovery_cache/documents/servicedirectory.v1.json b/googleapiclient/discovery_cache/documents/servicedirectory.v1.json
index d413161a0..ff0e52a1c 100644
--- a/googleapiclient/discovery_cache/documents/servicedirectory.v1.json
+++ b/googleapiclient/discovery_cache/documents/servicedirectory.v1.json
@@ -883,7 +883,7 @@
}
}
},
- "revision": "20210907",
+ "revision": "20210914",
"rootUrl": "https://servicedirectory.googleapis.com/",
"schemas": {
"Binding": {
diff --git a/googleapiclient/discovery_cache/documents/servicedirectory.v1beta1.json b/googleapiclient/discovery_cache/documents/servicedirectory.v1beta1.json
index 5089c8b2c..aace64f1a 100644
--- a/googleapiclient/discovery_cache/documents/servicedirectory.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/servicedirectory.v1beta1.json
@@ -883,7 +883,7 @@
}
}
},
- "revision": "20210907",
+ "revision": "20210914",
"rootUrl": "https://servicedirectory.googleapis.com/",
"schemas": {
"Binding": {
diff --git a/googleapiclient/discovery_cache/documents/servicemanagement.v1.json b/googleapiclient/discovery_cache/documents/servicemanagement.v1.json
index beb997f45..d320cc7c3 100644
--- a/googleapiclient/discovery_cache/documents/servicemanagement.v1.json
+++ b/googleapiclient/discovery_cache/documents/servicemanagement.v1.json
@@ -829,7 +829,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210917",
"rootUrl": "https://servicemanagement.googleapis.com/",
"schemas": {
"Advice": {
diff --git a/googleapiclient/discovery_cache/documents/servicenetworking.v1.json b/googleapiclient/discovery_cache/documents/servicenetworking.v1.json
index fabf57014..f0603b62b 100644
--- a/googleapiclient/discovery_cache/documents/servicenetworking.v1.json
+++ b/googleapiclient/discovery_cache/documents/servicenetworking.v1.json
@@ -860,7 +860,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://servicenetworking.googleapis.com/",
"schemas": {
"AddDnsRecordSetMetadata": {
@@ -987,6 +987,10 @@
"format": "int32",
"type": "integer"
},
+ "outsideAllocationPublicIpRange": {
+ "description": "Optional. Enable outside allocation using public IP addresses. Any public IP range may be specified. If this field is provided, we will not use customer reserved ranges for this primary IP range.",
+ "type": "string"
+ },
"privateIpv6GoogleAccess": {
"description": "Optional. The private IPv6 google access type for the VMs in this subnet. For information about the access types that can be set using this field, see [subnetwork](https://cloud.google.com/compute/docs/reference/rest/v1/subnetworks) in the Compute API documentation.",
"type": "string"
@@ -2822,6 +2826,10 @@
"format": "int32",
"type": "integer"
},
+ "outsideAllocationPublicIpRange": {
+ "description": "Optional. Enable outside allocation using public IP addresses. Any public IP range may be specified. If this field is provided, we will not use customer reserved ranges for this secondary IP range.",
+ "type": "string"
+ },
"rangeName": {
"description": "Required. A name for the secondary IP range. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork.",
"type": "string"
diff --git a/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json b/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json
index ef2f28d0c..354e2612d 100644
--- a/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json
@@ -307,7 +307,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://servicenetworking.googleapis.com/",
"schemas": {
"AddDnsRecordSetMetadata": {
diff --git a/googleapiclient/discovery_cache/documents/serviceusage.v1.json b/googleapiclient/discovery_cache/documents/serviceusage.v1.json
index c15b17b8a..6086f17d4 100644
--- a/googleapiclient/discovery_cache/documents/serviceusage.v1.json
+++ b/googleapiclient/discovery_cache/documents/serviceusage.v1.json
@@ -426,7 +426,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210924",
"rootUrl": "https://serviceusage.googleapis.com/",
"schemas": {
"AdminQuotaPolicy": {
diff --git a/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json b/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json
index 2505b2998..15a59627d 100644
--- a/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json
@@ -959,7 +959,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210924",
"rootUrl": "https://serviceusage.googleapis.com/",
"schemas": {
"AdminQuotaPolicy": {
diff --git a/googleapiclient/discovery_cache/documents/sheets.v4.json b/googleapiclient/discovery_cache/documents/sheets.v4.json
index 5845f6be7..8f8a40724 100644
--- a/googleapiclient/discovery_cache/documents/sheets.v4.json
+++ b/googleapiclient/discovery_cache/documents/sheets.v4.json
@@ -870,7 +870,7 @@
}
}
},
- "revision": "20210906",
+ "revision": "20210916",
"rootUrl": "https://sheets.googleapis.com/",
"schemas": {
"AddBandingRequest": {
diff --git a/googleapiclient/discovery_cache/documents/storage.v1.json b/googleapiclient/discovery_cache/documents/storage.v1.json
index a00c74fe2..1ad40d364 100644
--- a/googleapiclient/discovery_cache/documents/storage.v1.json
+++ b/googleapiclient/discovery_cache/documents/storage.v1.json
@@ -26,7 +26,7 @@
"description": "Stores and retrieves potentially large, immutable data objects.",
"discoveryVersion": "v1",
"documentationLink": "https://developers.google.com/storage/docs/json_api/",
- "etag": "\"353939383733353534373734383337373436\"",
+ "etag": "\"31383038373534363135323237313631333333\"",
"icons": {
"x16": "https://www.google.com/images/icons/product/cloud_storage-16.png",
"x32": "https://www.google.com/images/icons/product/cloud_storage-32.png"
@@ -3230,7 +3230,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210922",
"rootUrl": "https://storage.googleapis.com/",
"schemas": {
"Bucket": {
diff --git a/googleapiclient/discovery_cache/documents/streetviewpublish.v1.json b/googleapiclient/discovery_cache/documents/streetviewpublish.v1.json
index 984b55c97..bd5f37f99 100644
--- a/googleapiclient/discovery_cache/documents/streetviewpublish.v1.json
+++ b/googleapiclient/discovery_cache/documents/streetviewpublish.v1.json
@@ -375,7 +375,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://streetviewpublish.googleapis.com/",
"schemas": {
"BatchDeletePhotosRequest": {
diff --git a/googleapiclient/discovery_cache/documents/sts.v1.json b/googleapiclient/discovery_cache/documents/sts.v1.json
index bae7567a1..f6737de56 100644
--- a/googleapiclient/discovery_cache/documents/sts.v1.json
+++ b/googleapiclient/discovery_cache/documents/sts.v1.json
@@ -131,7 +131,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210918",
"rootUrl": "https://sts.googleapis.com/",
"schemas": {
"GoogleIamV1Binding": {
diff --git a/googleapiclient/discovery_cache/documents/sts.v1beta.json b/googleapiclient/discovery_cache/documents/sts.v1beta.json
index 45a727aab..f96db47a7 100644
--- a/googleapiclient/discovery_cache/documents/sts.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/sts.v1beta.json
@@ -116,7 +116,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210918",
"rootUrl": "https://sts.googleapis.com/",
"schemas": {
"GoogleIamV1Binding": {
diff --git a/googleapiclient/discovery_cache/documents/tagmanager.v1.json b/googleapiclient/discovery_cache/documents/tagmanager.v1.json
index 287f31989..442557fd4 100644
--- a/googleapiclient/discovery_cache/documents/tagmanager.v1.json
+++ b/googleapiclient/discovery_cache/documents/tagmanager.v1.json
@@ -1932,7 +1932,7 @@
}
}
},
- "revision": "20210915",
+ "revision": "20210922",
"rootUrl": "https://tagmanager.googleapis.com/",
"schemas": {
"Account": {
diff --git a/googleapiclient/discovery_cache/documents/tagmanager.v2.json b/googleapiclient/discovery_cache/documents/tagmanager.v2.json
index 32bbccee3..2db4ea7b5 100644
--- a/googleapiclient/discovery_cache/documents/tagmanager.v2.json
+++ b/googleapiclient/discovery_cache/documents/tagmanager.v2.json
@@ -3317,7 +3317,7 @@
}
}
},
- "revision": "20210915",
+ "revision": "20210922",
"rootUrl": "https://tagmanager.googleapis.com/",
"schemas": {
"Account": {
diff --git a/googleapiclient/discovery_cache/documents/tasks.v1.json b/googleapiclient/discovery_cache/documents/tasks.v1.json
index 5cd3d4ee8..5175b5468 100644
--- a/googleapiclient/discovery_cache/documents/tasks.v1.json
+++ b/googleapiclient/discovery_cache/documents/tasks.v1.json
@@ -566,7 +566,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://tasks.googleapis.com/",
"schemas": {
"Task": {
diff --git a/googleapiclient/discovery_cache/documents/testing.v1.json b/googleapiclient/discovery_cache/documents/testing.v1.json
index f0602880b..4db8472a7 100644
--- a/googleapiclient/discovery_cache/documents/testing.v1.json
+++ b/googleapiclient/discovery_cache/documents/testing.v1.json
@@ -282,7 +282,7 @@
}
}
},
- "revision": "20210908",
+ "revision": "20210918",
"rootUrl": "https://testing.googleapis.com/",
"schemas": {
"Account": {
diff --git a/googleapiclient/discovery_cache/documents/texttospeech.v1.json b/googleapiclient/discovery_cache/documents/texttospeech.v1.json
index bdf799f54..3e65f99d4 100644
--- a/googleapiclient/discovery_cache/documents/texttospeech.v1.json
+++ b/googleapiclient/discovery_cache/documents/texttospeech.v1.json
@@ -153,7 +153,7 @@
}
}
},
- "revision": "20210827",
+ "revision": "20210917",
"rootUrl": "https://texttospeech.googleapis.com/",
"schemas": {
"AudioConfig": {
diff --git a/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json b/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json
index 730aacb21..d631a51c3 100644
--- a/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json
@@ -153,7 +153,7 @@
}
}
},
- "revision": "20210827",
+ "revision": "20210917",
"rootUrl": "https://texttospeech.googleapis.com/",
"schemas": {
"AudioConfig": {
diff --git a/googleapiclient/discovery_cache/documents/toolresults.v1beta3.json b/googleapiclient/discovery_cache/documents/toolresults.v1beta3.json
index 467631e93..c74cec056 100644
--- a/googleapiclient/discovery_cache/documents/toolresults.v1beta3.json
+++ b/googleapiclient/discovery_cache/documents/toolresults.v1beta3.json
@@ -1463,7 +1463,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210927",
"rootUrl": "https://toolresults.googleapis.com/",
"schemas": {
"ANR": {
diff --git a/googleapiclient/discovery_cache/documents/vectortile.v1.json b/googleapiclient/discovery_cache/documents/vectortile.v1.json
index b6d8ef1b4..7db2bd449 100644
--- a/googleapiclient/discovery_cache/documents/vectortile.v1.json
+++ b/googleapiclient/discovery_cache/documents/vectortile.v1.json
@@ -343,7 +343,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://vectortile.googleapis.com/",
"schemas": {
"Area": {
diff --git a/googleapiclient/discovery_cache/documents/webrisk.v1.json b/googleapiclient/discovery_cache/documents/webrisk.v1.json
index 09bb0eac7..022477284 100644
--- a/googleapiclient/discovery_cache/documents/webrisk.v1.json
+++ b/googleapiclient/discovery_cache/documents/webrisk.v1.json
@@ -446,7 +446,7 @@
}
}
},
- "revision": "20210910",
+ "revision": "20210918",
"rootUrl": "https://webrisk.googleapis.com/",
"schemas": {
"GoogleCloudWebriskV1ComputeThreatListDiffResponse": {
diff --git a/googleapiclient/discovery_cache/documents/websecurityscanner.v1.json b/googleapiclient/discovery_cache/documents/websecurityscanner.v1.json
index d935d30d7..1fd8f52a1 100644
--- a/googleapiclient/discovery_cache/documents/websecurityscanner.v1.json
+++ b/googleapiclient/discovery_cache/documents/websecurityscanner.v1.json
@@ -526,7 +526,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://websecurityscanner.googleapis.com/",
"schemas": {
"Authentication": {
diff --git a/googleapiclient/discovery_cache/documents/websecurityscanner.v1alpha.json b/googleapiclient/discovery_cache/documents/websecurityscanner.v1alpha.json
index 5bda6340a..404d4e00a 100644
--- a/googleapiclient/discovery_cache/documents/websecurityscanner.v1alpha.json
+++ b/googleapiclient/discovery_cache/documents/websecurityscanner.v1alpha.json
@@ -526,7 +526,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://websecurityscanner.googleapis.com/",
"schemas": {
"Authentication": {
diff --git a/googleapiclient/discovery_cache/documents/websecurityscanner.v1beta.json b/googleapiclient/discovery_cache/documents/websecurityscanner.v1beta.json
index 3afc0a977..f06efb79a 100644
--- a/googleapiclient/discovery_cache/documents/websecurityscanner.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/websecurityscanner.v1beta.json
@@ -526,7 +526,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210924",
"rootUrl": "https://websecurityscanner.googleapis.com/",
"schemas": {
"Authentication": {
diff --git a/googleapiclient/discovery_cache/documents/workflowexecutions.v1.json b/googleapiclient/discovery_cache/documents/workflowexecutions.v1.json
index ce9f56324..bbe4229f9 100644
--- a/googleapiclient/discovery_cache/documents/workflowexecutions.v1.json
+++ b/googleapiclient/discovery_cache/documents/workflowexecutions.v1.json
@@ -269,7 +269,7 @@
}
}
},
- "revision": "20210913",
+ "revision": "20210920",
"rootUrl": "https://workflowexecutions.googleapis.com/",
"schemas": {
"CancelExecutionRequest": {
diff --git a/googleapiclient/discovery_cache/documents/workflowexecutions.v1beta.json b/googleapiclient/discovery_cache/documents/workflowexecutions.v1beta.json
index 0d4e2d3cd..dbcd40ca3 100644
--- a/googleapiclient/discovery_cache/documents/workflowexecutions.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/workflowexecutions.v1beta.json
@@ -269,7 +269,7 @@
}
}
},
- "revision": "20210913",
+ "revision": "20210920",
"rootUrl": "https://workflowexecutions.googleapis.com/",
"schemas": {
"CancelExecutionRequest": {
diff --git a/googleapiclient/discovery_cache/documents/workflows.v1.json b/googleapiclient/discovery_cache/documents/workflows.v1.json
index 3ec080386..a6149c066 100644
--- a/googleapiclient/discovery_cache/documents/workflows.v1.json
+++ b/googleapiclient/discovery_cache/documents/workflows.v1.json
@@ -444,7 +444,7 @@
}
}
},
- "revision": "20210908",
+ "revision": "20210915",
"rootUrl": "https://workflows.googleapis.com/",
"schemas": {
"Empty": {
diff --git a/googleapiclient/discovery_cache/documents/workflows.v1beta.json b/googleapiclient/discovery_cache/documents/workflows.v1beta.json
index dc440acb5..702f465d8 100644
--- a/googleapiclient/discovery_cache/documents/workflows.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/workflows.v1beta.json
@@ -444,7 +444,7 @@
}
}
},
- "revision": "20210908",
+ "revision": "20210915",
"rootUrl": "https://workflows.googleapis.com/",
"schemas": {
"Empty": {
diff --git a/googleapiclient/discovery_cache/documents/youtube.v3.json b/googleapiclient/discovery_cache/documents/youtube.v3.json
index 19601b4ff..45b615313 100644
--- a/googleapiclient/discovery_cache/documents/youtube.v3.json
+++ b/googleapiclient/discovery_cache/documents/youtube.v3.json
@@ -3789,7 +3789,7 @@
}
}
},
- "revision": "20210919",
+ "revision": "20210925",
"rootUrl": "https://youtube.googleapis.com/",
"schemas": {
"AbuseReport": {
diff --git a/googleapiclient/discovery_cache/documents/youtubeAnalytics.v2.json b/googleapiclient/discovery_cache/documents/youtubeAnalytics.v2.json
index c5f730252..b573a7061 100644
--- a/googleapiclient/discovery_cache/documents/youtubeAnalytics.v2.json
+++ b/googleapiclient/discovery_cache/documents/youtubeAnalytics.v2.json
@@ -421,7 +421,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210925",
"rootUrl": "https://youtubeanalytics.googleapis.com/",
"schemas": {
"EmptyResponse": {
diff --git a/googleapiclient/discovery_cache/documents/youtubereporting.v1.json b/googleapiclient/discovery_cache/documents/youtubereporting.v1.json
index 8a292c798..8676f039c 100644
--- a/googleapiclient/discovery_cache/documents/youtubereporting.v1.json
+++ b/googleapiclient/discovery_cache/documents/youtubereporting.v1.json
@@ -411,7 +411,7 @@
}
}
},
- "revision": "20210918",
+ "revision": "20210925",
"rootUrl": "https://youtubereporting.googleapis.com/",
"schemas": {
"Empty": {