aboutsummaryrefslogtreecommitdiff
path: root/google/cloud/vision
diff options
context:
space:
mode:
Diffstat (limited to 'google/cloud/vision')
-rw-r--r--google/cloud/vision/BUILD.bazel1
-rw-r--r--google/cloud/vision/artman_vision_v1.yaml37
-rw-r--r--google/cloud/vision/artman_vision_v1p1beta1.yaml36
-rw-r--r--google/cloud/vision/artman_vision_v1p2beta1.yaml36
-rw-r--r--google/cloud/vision/artman_vision_v1p3beta1.yaml41
-rw-r--r--google/cloud/vision/artman_vision_v1p4beta1.yaml34
-rw-r--r--google/cloud/vision/v1/BUILD.bazel158
-rw-r--r--google/cloud/vision/v1/geometry.proto71
-rw-r--r--google/cloud/vision/v1/image_annotator.proto806
-rw-r--r--google/cloud/vision/v1/product_search.proto97
-rw-r--r--google/cloud/vision/v1/product_search_service.proto849
-rw-r--r--google/cloud/vision/v1/text_annotation.proto261
-rw-r--r--google/cloud/vision/v1/vision_gapic.yaml457
-rw-r--r--google/cloud/vision/v1/web_detection.proto107
-rw-r--r--google/cloud/vision/v1p1beta1/BUILD.bazel153
-rw-r--r--google/cloud/vision/v1p1beta1/geometry.proto53
-rw-r--r--google/cloud/vision/v1p1beta1/image_annotator.proto592
-rw-r--r--google/cloud/vision/v1p1beta1/text_annotation.proto252
-rw-r--r--google/cloud/vision/v1p1beta1/vision_gapic.yaml55
-rw-r--r--google/cloud/vision/v1p1beta1/web_detection.proto104
-rw-r--r--google/cloud/vision/v1p2beta1/BUILD.bazel154
-rw-r--r--google/cloud/vision/v1p2beta1/geometry.proto67
-rw-r--r--google/cloud/vision/v1p2beta1/image_annotator.proto765
-rw-r--r--google/cloud/vision/v1p2beta1/text_annotation.proto259
-rw-r--r--google/cloud/vision/v1p2beta1/vision_gapic.yaml72
-rw-r--r--google/cloud/vision/v1p2beta1/web_detection.proto104
-rw-r--r--google/cloud/vision/v1p3beta1/BUILD.bazel158
-rw-r--r--google/cloud/vision/v1p3beta1/geometry.proto75
-rw-r--r--google/cloud/vision/v1p3beta1/image_annotator.proto803
-rw-r--r--google/cloud/vision/v1p3beta1/product_search.proto162
-rw-r--r--google/cloud/vision/v1p3beta1/product_search_service.proto842
-rw-r--r--google/cloud/vision/v1p3beta1/text_annotation.proto259
-rw-r--r--google/cloud/vision/v1p3beta1/vision_gapic.yaml444
-rw-r--r--google/cloud/vision/v1p3beta1/web_detection.proto104
-rw-r--r--google/cloud/vision/v1p4beta1/BUILD.bazel158
-rw-r--r--google/cloud/vision/v1p4beta1/geometry.proto71
-rw-r--r--google/cloud/vision/v1p4beta1/image_annotator.proto905
-rw-r--r--google/cloud/vision/v1p4beta1/product_search.proto98
-rw-r--r--google/cloud/vision/v1p4beta1/product_search_service.proto852
-rw-r--r--google/cloud/vision/v1p4beta1/text_annotation.proto261
-rw-r--r--google/cloud/vision/v1p4beta1/vision_gapic.yaml603
-rw-r--r--google/cloud/vision/v1p4beta1/web_detection.proto107
-rw-r--r--google/cloud/vision/vision_v1.yaml37
-rw-r--r--google/cloud/vision/vision_v1p1beta1.yaml21
-rw-r--r--google/cloud/vision/vision_v1p2beta1.yaml26
-rw-r--r--google/cloud/vision/vision_v1p3beta1.yaml37
-rw-r--r--google/cloud/vision/vision_v1p4beta1.yaml39
47 files changed, 11683 insertions, 0 deletions
diff --git a/google/cloud/vision/BUILD.bazel b/google/cloud/vision/BUILD.bazel
new file mode 100644
index 000000000..a87c57fec
--- /dev/null
+++ b/google/cloud/vision/BUILD.bazel
@@ -0,0 +1 @@
+exports_files(glob(["*.yaml"]))
diff --git a/google/cloud/vision/artman_vision_v1.yaml b/google/cloud/vision/artman_vision_v1.yaml
new file mode 100644
index 000000000..6b037447e
--- /dev/null
+++ b/google/cloud/vision/artman_vision_v1.yaml
@@ -0,0 +1,37 @@
+common:
+ api_name: vision
+ api_version: v1
+ organization_name: google-cloud
+ service_yaml: vision_v1.yaml
+ gapic_yaml: v1/vision_gapic.yaml
+ src_proto_paths:
+ - v1
+ proto_deps:
+ - name: google-common-protos
+artifacts:
+- name: java_gapic
+ type: GAPIC
+ language: JAVA
+ release_level: GA
+- name: python_gapic
+ type: GAPIC
+ language: PYTHON
+ release_level: BETA
+- name: php_gapic
+ type: GAPIC
+ language: PHP
+- name: ruby_gapic
+ type: GAPIC
+ language: RUBY
+- name: go_gapic
+ type: GAPIC
+ language: GO
+- name: csharp_gapic
+ type: GAPIC
+ language: CSHARP
+- name: nodejs_gapic
+ type: GAPIC
+ language: NODEJS
+ release_level: BETA
+- name: gapic_config
+ type: GAPIC_CONFIG
diff --git a/google/cloud/vision/artman_vision_v1p1beta1.yaml b/google/cloud/vision/artman_vision_v1p1beta1.yaml
new file mode 100644
index 000000000..c1c6a3f86
--- /dev/null
+++ b/google/cloud/vision/artman_vision_v1p1beta1.yaml
@@ -0,0 +1,36 @@
+common:
+ api_name: vision
+ api_version: v1p1beta1
+ organization_name: google-cloud
+ service_yaml: vision_v1p1beta1.yaml
+ gapic_yaml: v1p1beta1/vision_gapic.yaml
+ src_proto_paths:
+ - v1p1beta1
+ proto_deps:
+ - name: google-common-protos
+artifacts:
+- name: java_gapic
+ type: GAPIC
+ language: JAVA
+- name: python_gapic
+ type: GAPIC
+ language: PYTHON
+ release_level: BETA
+- name: php_gapic
+ type: GAPIC
+ language: PHP
+- name: ruby_gapic
+ type: GAPIC
+ language: RUBY
+- name: go_gapic
+ type: GAPIC
+ language: GO
+- name: csharp_gapic
+ type: GAPIC
+ language: CSHARP
+- name: nodejs_gapic
+ type: GAPIC
+ language: NODEJS
+ release_level: BETA
+- name: gapic_config
+ type: GAPIC_CONFIG
diff --git a/google/cloud/vision/artman_vision_v1p2beta1.yaml b/google/cloud/vision/artman_vision_v1p2beta1.yaml
new file mode 100644
index 000000000..02967283c
--- /dev/null
+++ b/google/cloud/vision/artman_vision_v1p2beta1.yaml
@@ -0,0 +1,36 @@
+common:
+ api_name: vision
+ api_version: v1p2beta1
+ organization_name: google-cloud
+ service_yaml: vision_v1p2beta1.yaml
+ gapic_yaml: v1p2beta1/vision_gapic.yaml
+ src_proto_paths:
+ - v1p2beta1
+ proto_deps:
+ - name: google-common-protos
+artifacts:
+- name: java_gapic
+ type: GAPIC
+ language: JAVA
+- name: python_gapic
+ type: GAPIC
+ language: PYTHON
+ release_level: BETA
+- name: php_gapic
+ type: GAPIC
+ language: PHP
+- name: ruby_gapic
+ type: GAPIC
+ language: RUBY
+- name: go_gapic
+ type: GAPIC
+ language: GO
+- name: csharp_gapic
+ type: GAPIC
+ language: CSHARP
+- name: nodejs_gapic
+ type: GAPIC
+ language: NODEJS
+ release_level: BETA
+- name: gapic_config
+ type: GAPIC_CONFIG
diff --git a/google/cloud/vision/artman_vision_v1p3beta1.yaml b/google/cloud/vision/artman_vision_v1p3beta1.yaml
new file mode 100644
index 000000000..be0c34b0f
--- /dev/null
+++ b/google/cloud/vision/artman_vision_v1p3beta1.yaml
@@ -0,0 +1,41 @@
+common:
+ api_name: vision
+ api_version: v1p3beta1
+ organization_name: google-cloud
+ service_yaml: vision_v1p3beta1.yaml
+ gapic_yaml: v1p3beta1/vision_gapic.yaml
+ src_proto_paths:
+ - v1p3beta1
+ proto_deps:
+ - name: google-common-protos
+artifacts:
+- name: java_gapic
+ type: GAPIC
+ language: JAVA
+- name: python_gapic
+ type: GAPIC
+ language: PYTHON
+ release_level: BETA
+ package_version:
+ grpc_dep_lower_bound: 0.90.3
+ grpc_dep_upper_bound: 0.91dev
+- name: php_gapic
+ type: GAPIC
+ language: PHP
+- name: ruby_gapic
+ type: GAPIC
+ language: RUBY
+- name: go_gapic
+ type: GAPIC
+ language: GO
+- name: csharp_gapic
+ type: GAPIC
+ language: CSHARP
+- name: nodejs_gapic
+ type: GAPIC
+ language: NODEJS
+ release_level: BETA
+ package_version:
+ grpc_dep_lower_bound: 0.12.0
+- name: gapic_config
+ type: GAPIC_CONFIG
diff --git a/google/cloud/vision/artman_vision_v1p4beta1.yaml b/google/cloud/vision/artman_vision_v1p4beta1.yaml
new file mode 100644
index 000000000..b7e00b3a0
--- /dev/null
+++ b/google/cloud/vision/artman_vision_v1p4beta1.yaml
@@ -0,0 +1,34 @@
+common:
+ api_name: vision
+ api_version: v1p4beta1
+ organization_name: google-cloud
+ proto_deps:
+ - name: google-common-protos
+ src_proto_paths:
+ - v1p4beta1
+ service_yaml: vision_v1p4beta1.yaml
+ gapic_yaml: v1p4beta1/vision_gapic.yaml
+artifacts:
+- name: gapic_config
+ type: GAPIC_CONFIG
+- name: java_gapic
+ type: GAPIC
+ language: JAVA
+- name: python_gapic
+ type: GAPIC
+ language: PYTHON
+- name: nodejs_gapic
+ type: GAPIC
+ language: NODEJS
+- name: php_gapic
+ type: GAPIC
+ language: PHP
+- name: go_gapic
+ type: GAPIC
+ language: GO
+- name: ruby_gapic
+ type: GAPIC
+ language: RUBY
+- name: csharp_gapic
+ type: GAPIC
+ language: CSHARP
diff --git a/google/cloud/vision/v1/BUILD.bazel b/google/cloud/vision/v1/BUILD.bazel
new file mode 100644
index 000000000..01ec43694
--- /dev/null
+++ b/google/cloud/vision/v1/BUILD.bazel
@@ -0,0 +1,158 @@
+# This is an API workspace, having public visibility by default makes perfect sense.
+package(default_visibility = ["//visibility:public"])
+
+##############################################################################
+# Common
+##############################################################################
+load("@com_google_api_codegen//rules_gapic:gapic.bzl", "proto_library_with_info")
+
+proto_library(
+ name = "vision_proto",
+ srcs = [
+ "geometry.proto",
+ "image_annotator.proto",
+ "product_search.proto",
+ "product_search_service.proto",
+ "text_annotation.proto",
+ "web_detection.proto",
+ ],
+ deps = [
+ "//google/api:annotations_proto",
+ "//google/longrunning:operations_proto",
+ "//google/rpc:status_proto",
+ "//google/type:color_proto",
+ "//google/type:latlng_proto",
+ "@com_google_protobuf//:empty_proto",
+ "@com_google_protobuf//:field_mask_proto",
+ "@com_google_protobuf//:timestamp_proto",
+ ],
+)
+
+proto_library_with_info(
+ name = "vision_proto_with_info",
+ deps = [":vision_proto"],
+)
+
+##############################################################################
+# Java
+##############################################################################
+load("@io_grpc_grpc_java//:java_grpc_library.bzl", "java_grpc_library")
+load(
+ "@com_google_api_codegen//rules_gapic/java:java_gapic.bzl",
+ "java_gapic_library",
+ "java_resource_name_proto_library",
+)
+load("@com_google_api_codegen//rules_gapic/java:java_gapic_pkg.bzl", "java_gapic_assembly_gradle_pkg")
+
+_JAVA_GRPC_DEPS = [
+ "@com_google_api_grpc_proto_google_common_protos//jar",
+]
+
+java_proto_library(
+ name = "vision_java_proto",
+ deps = [":vision_proto"],
+)
+
+java_grpc_library(
+ name = "vision_java_grpc",
+ srcs = [":vision_proto"],
+ deps = [":vision_java_proto"] + _JAVA_GRPC_DEPS,
+)
+
+java_resource_name_proto_library(
+ name = "vision_resource_name_java_proto",
+ gapic_yaml = "vision_gapic.yaml",
+ deps = [":vision_proto"],
+)
+
+java_gapic_library(
+ name = "vision_java_gapic",
+ src = ":vision_proto_with_info",
+ gapic_yaml = "vision_gapic.yaml",
+ service_yaml = "//google/cloud/vision:vision_v1.yaml",
+ test_deps = [":vision_java_grpc"],
+ deps = [
+ ":vision_java_proto",
+ ":vision_resource_name_java_proto",
+ ] + _JAVA_GRPC_DEPS,
+)
+
+[java_test(
+ name = test_name,
+ test_class = test_name,
+ runtime_deps = [
+ ":vision_java_gapic_test",
+ ],
+) for test_name in [
+ "com.google.cloud.vision.v1.ImageAnnotatorClientTest",
+]]
+
+# Opensource Packages
+java_gapic_assembly_gradle_pkg(
+ name = "google-cloud-vision-v1-java",
+ client_deps = [":vision_java_gapic"],
+ client_group = "com.google.cloud",
+ client_test_deps = [":vision_java_gapic_test"],
+ grpc_deps = [":vision_java_grpc"],
+ grpc_group = "com.google.api.grpc",
+ proto_deps = [
+ ":vision_java_proto",
+ ":vision_proto",
+ ":vision_resource_name_java_proto",
+ ] + _JAVA_GRPC_DEPS,
+ version = "0.0.0-SNAPSHOT",
+)
+
+##############################################################################
+# Go
+##############################################################################
+load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
+load("@com_google_api_codegen//rules_gapic/go:go_gapic.bzl", "go_gapic_srcjar", "go_gapic_library")
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+load("@com_google_api_codegen//rules_gapic/go:go_gapic_pkg.bzl", "go_gapic_assembly_pkg")
+
+go_proto_library(
+ name = "vision_go_proto",
+ compilers = ["@io_bazel_rules_go//proto:go_grpc"],
+ importpath = "google.golang.org/genproto/googleapis/cloud/vision/v1",
+ protos = [":vision_proto_with_info"],
+ deps = [
+ "//google/api:annotations_go_proto",
+ "//google/longrunning:longrunning_go_proto",
+ "//google/rpc:status_go_proto",
+ "//google/type:color_go_proto",
+ "//google/type:latlng_go_proto",
+ ],
+)
+
+go_gapic_library(
+ name = "vision_go_gapic",
+ src = ":vision_proto_with_info",
+ gapic_yaml = "vision_gapic.yaml",
+ importpath = "cloud.google.com/go/vision/apiv1",
+ service_yaml = "//google/cloud/vision:vision_v1.yaml",
+ deps = [
+ ":vision_go_proto",
+ "//google/longrunning:longrunning_go_gapic",
+ "//google/longrunning:longrunning_go_proto",
+ "@com_google_cloud_go//longrunning:go_default_library",
+ ],
+)
+
+go_test(
+ name = "vision_go_gapic_test",
+ srcs = [":vision_go_gapic_srcjar_test"],
+ embed = [":vision_go_gapic"],
+ importpath = "cloud.google.com/go/vision/apiv1",
+)
+
+# Opensource Packages
+go_gapic_assembly_pkg(
+ name = "gapi-cloud-vision-v1-go",
+ deps = [
+ ":vision_go_gapic",
+ ":vision_go_gapic_srcjar-smoke-test.srcjar",
+ ":vision_go_gapic_srcjar-test.srcjar",
+ ":vision_go_proto",
+ ],
+)
diff --git a/google/cloud/vision/v1/geometry.proto b/google/cloud/vision/v1/geometry.proto
new file mode 100644
index 000000000..f3ba8b773
--- /dev/null
+++ b/google/cloud/vision/v1/geometry.proto
@@ -0,0 +1,71 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.vision.v1;
+
+import "google/api/annotations.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "GeometryProto";
+option java_package = "com.google.cloud.vision.v1";
+option objc_class_prefix = "GCVN";
+
+// A vertex represents a 2D point in the image.
+// NOTE: the vertex coordinates are in the same scale as the original image.
+message Vertex {
+ // X coordinate.
+ int32 x = 1;
+
+ // Y coordinate.
+ int32 y = 2;
+}
+
+// A vertex represents a 2D point in the image.
+// NOTE: the normalized vertex coordinates are relative to the original image
+// and range from 0 to 1.
+message NormalizedVertex {
+ // X coordinate.
+ float x = 1;
+
+ // Y coordinate.
+ float y = 2;
+}
+
+// A bounding polygon for the detected image annotation.
+message BoundingPoly {
+ // The bounding polygon vertices.
+ repeated Vertex vertices = 1;
+
+ // The bounding polygon normalized vertices.
+ repeated NormalizedVertex normalized_vertices = 2;
+}
+
+// A 3D position in the image, used primarily for Face detection landmarks.
+// A valid Position must have both x and y coordinates.
+// The position coordinates are in the same scale as the original image.
+message Position {
+ // X coordinate.
+ float x = 1;
+
+ // Y coordinate.
+ float y = 2;
+
+ // Z coordinate (or depth).
+ float z = 3;
+}
diff --git a/google/cloud/vision/v1/image_annotator.proto b/google/cloud/vision/v1/image_annotator.proto
new file mode 100644
index 000000000..abaf1bd75
--- /dev/null
+++ b/google/cloud/vision/v1/image_annotator.proto
@@ -0,0 +1,806 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.vision.v1;
+
+import "google/api/annotations.proto";
+import "google/cloud/vision/v1/geometry.proto";
+import "google/cloud/vision/v1/product_search.proto";
+import "google/cloud/vision/v1/text_annotation.proto";
+import "google/cloud/vision/v1/web_detection.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+import "google/type/color.proto";
+import "google/type/latlng.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "ImageAnnotatorProto";
+option java_package = "com.google.cloud.vision.v1";
+option objc_class_prefix = "GCVN";
+
+// Service that performs Google Cloud Vision API detection tasks over client
+// images, such as face, landmark, logo, label, and text detection. The
+// ImageAnnotator service returns detected entities from the images.
+service ImageAnnotator {
+ // Run image detection and annotation for a batch of images.
+ rpc BatchAnnotateImages(BatchAnnotateImagesRequest)
+ returns (BatchAnnotateImagesResponse) {
+ option (google.api.http) = {
+ post: "/v1/images:annotate"
+ body: "*"
+ };
+ }
+
+ // Run asynchronous image detection and annotation for a list of generic
+ // files, such as PDF files, which may contain multiple pages and multiple
+ // images per page. Progress and results can be retrieved through the
+ // `google.longrunning.Operations` interface.
+ // `Operation.metadata` contains `OperationMetadata` (metadata).
+ // `Operation.response` contains `AsyncBatchAnnotateFilesResponse` (results).
+ rpc AsyncBatchAnnotateFiles(AsyncBatchAnnotateFilesRequest)
+ returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1/files:asyncBatchAnnotate"
+ body: "*"
+ };
+ }
+}
+
+// The type of Google Cloud Vision API detection to perform, and the maximum
+// number of results to return for that type. Multiple `Feature` objects can
+// be specified in the `features` list.
+message Feature {
+ // Type of Google Cloud Vision API feature to be extracted.
+ enum Type {
+ // Unspecified feature type.
+ TYPE_UNSPECIFIED = 0;
+
+ // Run face detection.
+ FACE_DETECTION = 1;
+
+ // Run landmark detection.
+ LANDMARK_DETECTION = 2;
+
+ // Run logo detection.
+ LOGO_DETECTION = 3;
+
+ // Run label detection.
+ LABEL_DETECTION = 4;
+
+ // Run text detection / optical character recognition (OCR). Text detection
+ // is optimized for areas of text within a larger image; if the image is
+ // a document, use `DOCUMENT_TEXT_DETECTION` instead.
+ TEXT_DETECTION = 5;
+
+ // Run dense text document OCR. Takes precedence when both
+ // `DOCUMENT_TEXT_DETECTION` and `TEXT_DETECTION` are present.
+ DOCUMENT_TEXT_DETECTION = 11;
+
+ // Run Safe Search to detect potentially unsafe
+ // or undesirable content.
+ SAFE_SEARCH_DETECTION = 6;
+
+ // Compute a set of image properties, such as the
+ // image's dominant colors.
+ IMAGE_PROPERTIES = 7;
+
+ // Run crop hints.
+ CROP_HINTS = 9;
+
+ // Run web detection.
+ WEB_DETECTION = 10;
+
+ // Run Product Search.
+ PRODUCT_SEARCH = 12;
+
+ // Run localizer for object detection.
+ OBJECT_LOCALIZATION = 19;
+ }
+
+ // The feature type.
+ Type type = 1;
+
+ // Maximum number of results of this type. Does not apply to
+ // `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
+ int32 max_results = 2;
+
+ // Model to use for the feature.
+ // Supported values: "builtin/stable" (the default if unset) and
+ // "builtin/latest".
+ string model = 3;
+}
+
+// External image source (Google Cloud Storage or web URL image location).
+message ImageSource {
+ // **Use `image_uri` instead.**
+ //
+ // The Google Cloud Storage URI of the form
+ // `gs://bucket_name/object_name`. Object versioning is not supported. See
+ // [Google Cloud Storage Request
+ // URIs](https://cloud.google.com/storage/docs/reference-uris) for more info.
+ string gcs_image_uri = 1;
+
+ // The URI of the source image. Can be either:
+ //
+ // 1. A Google Cloud Storage URI of the form
+ // `gs://bucket_name/object_name`. Object versioning is not supported. See
+ // [Google Cloud Storage Request
+ // URIs](https://cloud.google.com/storage/docs/reference-uris) for more
+ // info.
+ //
+ // 2. A publicly-accessible image HTTP/HTTPS URL. When fetching images from
+ // HTTP/HTTPS URLs, Google cannot guarantee that the request will be
+ // completed. Your request may fail if the specified host denies the
+ // request (e.g. due to request throttling or DOS prevention), or if Google
+ // throttles requests to the site for abuse prevention. You should not
+ // depend on externally-hosted images for production applications.
+ //
+ // When both `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
+ // precedence.
+ string image_uri = 2;
+}
+
+// Client image to perform Google Cloud Vision API tasks over.
+message Image {
+ // Image content, represented as a stream of bytes.
+ // Note: As with all `bytes` fields, protobuffers use a pure binary
+ // representation, whereas JSON representations use base64.
+ bytes content = 1;
+
+ // Google Cloud Storage image location, or publicly-accessible image
+ // URL. If both `content` and `source` are provided for an image, `content`
+ // takes precedence and is used to perform the image annotation request.
+ ImageSource source = 2;
+}
+
+// A face annotation object contains the results of face detection.
+message FaceAnnotation {
+ // A face-specific landmark (for example, a face feature).
+ message Landmark {
+ // Face landmark (feature) type.
+ // Left and right are defined from the vantage of the viewer of the image
+ // without considering mirror projections typical of photos. So, `LEFT_EYE`,
+ // typically, is the person's right eye.
+ enum Type {
+ // Unknown face landmark detected. Should not be filled.
+ UNKNOWN_LANDMARK = 0;
+
+ // Left eye.
+ LEFT_EYE = 1;
+
+ // Right eye.
+ RIGHT_EYE = 2;
+
+ // Left of left eyebrow.
+ LEFT_OF_LEFT_EYEBROW = 3;
+
+ // Right of left eyebrow.
+ RIGHT_OF_LEFT_EYEBROW = 4;
+
+ // Left of right eyebrow.
+ LEFT_OF_RIGHT_EYEBROW = 5;
+
+ // Right of right eyebrow.
+ RIGHT_OF_RIGHT_EYEBROW = 6;
+
+ // Midpoint between eyes.
+ MIDPOINT_BETWEEN_EYES = 7;
+
+ // Nose tip.
+ NOSE_TIP = 8;
+
+ // Upper lip.
+ UPPER_LIP = 9;
+
+ // Lower lip.
+ LOWER_LIP = 10;
+
+ // Mouth left.
+ MOUTH_LEFT = 11;
+
+ // Mouth right.
+ MOUTH_RIGHT = 12;
+
+ // Mouth center.
+ MOUTH_CENTER = 13;
+
+ // Nose, bottom right.
+ NOSE_BOTTOM_RIGHT = 14;
+
+ // Nose, bottom left.
+ NOSE_BOTTOM_LEFT = 15;
+
+ // Nose, bottom center.
+ NOSE_BOTTOM_CENTER = 16;
+
+ // Left eye, top boundary.
+ LEFT_EYE_TOP_BOUNDARY = 17;
+
+ // Left eye, right corner.
+ LEFT_EYE_RIGHT_CORNER = 18;
+
+ // Left eye, bottom boundary.
+ LEFT_EYE_BOTTOM_BOUNDARY = 19;
+
+ // Left eye, left corner.
+ LEFT_EYE_LEFT_CORNER = 20;
+
+ // Right eye, top boundary.
+ RIGHT_EYE_TOP_BOUNDARY = 21;
+
+ // Right eye, right corner.
+ RIGHT_EYE_RIGHT_CORNER = 22;
+
+ // Right eye, bottom boundary.
+ RIGHT_EYE_BOTTOM_BOUNDARY = 23;
+
+ // Right eye, left corner.
+ RIGHT_EYE_LEFT_CORNER = 24;
+
+ // Left eyebrow, upper midpoint.
+ LEFT_EYEBROW_UPPER_MIDPOINT = 25;
+
+ // Right eyebrow, upper midpoint.
+ RIGHT_EYEBROW_UPPER_MIDPOINT = 26;
+
+ // Left ear tragion.
+ LEFT_EAR_TRAGION = 27;
+
+ // Right ear tragion.
+ RIGHT_EAR_TRAGION = 28;
+
+ // Left eye pupil.
+ LEFT_EYE_PUPIL = 29;
+
+ // Right eye pupil.
+ RIGHT_EYE_PUPIL = 30;
+
+ // Forehead glabella.
+ FOREHEAD_GLABELLA = 31;
+
+ // Chin gnathion.
+ CHIN_GNATHION = 32;
+
+ // Chin left gonion.
+ CHIN_LEFT_GONION = 33;
+
+ // Chin right gonion.
+ CHIN_RIGHT_GONION = 34;
+ }
+
+ // Face landmark type.
+ Type type = 3;
+
+ // Face landmark position.
+ Position position = 4;
+ }
+
+ // The bounding polygon around the face. The coordinates of the bounding box
+ // are in the original image's scale, as returned in `ImageParams`.
+ // The bounding box is computed to "frame" the face in accordance with human
+ // expectations. It is based on the landmarker results.
+ // Note that one or more x and/or y coordinates may not be generated in the
+ // `BoundingPoly` (the polygon will be unbounded) if only a partial face
+ // appears in the image to be annotated.
+ BoundingPoly bounding_poly = 1;
+
+ // The `fd_bounding_poly` bounding polygon is tighter than the
+ // `boundingPoly`, and encloses only the skin part of the face. Typically, it
+ // is used to eliminate the face from any image analysis that detects the
+ // "amount of skin" visible in an image. It is not based on the
+ // landmarker results, only on the initial face detection, hence
+ // the <code>fd</code> (face detection) prefix.
+ BoundingPoly fd_bounding_poly = 2;
+
+ // Detected face landmarks.
+ repeated Landmark landmarks = 3;
+
+ // Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
+ // of the face relative to the image vertical about the axis perpendicular to
+ // the face. Range [-180,180].
+ float roll_angle = 4;
+
+ // Yaw angle, which indicates the leftward/rightward angle that the face is
+ // pointing relative to the vertical plane perpendicular to the image. Range
+ // [-180,180].
+ float pan_angle = 5;
+
+ // Pitch angle, which indicates the upwards/downwards angle that the face is
+ // pointing relative to the image's horizontal plane. Range [-180,180].
+ float tilt_angle = 6;
+
+ // Detection confidence. Range [0, 1].
+ float detection_confidence = 7;
+
+ // Face landmarking confidence. Range [0, 1].
+ float landmarking_confidence = 8;
+
+ // Joy likelihood.
+ Likelihood joy_likelihood = 9;
+
+ // Sorrow likelihood.
+ Likelihood sorrow_likelihood = 10;
+
+ // Anger likelihood.
+ Likelihood anger_likelihood = 11;
+
+ // Surprise likelihood.
+ Likelihood surprise_likelihood = 12;
+
+ // Under-exposed likelihood.
+ Likelihood under_exposed_likelihood = 13;
+
+ // Blurred likelihood.
+ Likelihood blurred_likelihood = 14;
+
+ // Headwear likelihood.
+ Likelihood headwear_likelihood = 15;
+}
+
+// Detected entity location information.
+message LocationInfo {
+ // lat/long location coordinates.
+ google.type.LatLng lat_lng = 1;
+}
+
+// A `Property` consists of a user-supplied name/value pair.
+message Property {
+ // Name of the property.
+ string name = 1;
+
+ // Value of the property.
+ string value = 2;
+
+ // Value of numeric properties.
+ uint64 uint64_value = 3;
+}
+
+// Set of detected entity features.
+message EntityAnnotation {
+ // Opaque entity ID. Some IDs may be available in
+ // [Google Knowledge Graph Search
+ // API](https://developers.google.com/knowledge-graph/).
+ string mid = 1;
+
+ // The language code for the locale in which the entity textual
+ // `description` is expressed.
+ string locale = 2;
+
+ // Entity textual description, expressed in its `locale` language.
+ string description = 3;
+
+ // Overall score of the result. Range [0, 1].
+ float score = 4;
+
+ // **Deprecated. Use `score` instead.**
+ // The accuracy of the entity detection in an image.
+ // For example, for an image in which the "Eiffel Tower" entity is detected,
+ // this field represents the confidence that there is a tower in the query
+ // image. Range [0, 1].
+ float confidence = 5 [deprecated = true];
+
+ // The relevancy of the ICA (Image Content Annotation) label to the
+ // image. For example, the relevancy of "tower" is likely higher to an image
+ // containing the detected "Eiffel Tower" than to an image containing a
+ // detected distant towering building, even though the confidence that
+ // there is a tower in each image may be the same. Range [0, 1].
+ float topicality = 6;
+
+ // Image region to which this entity belongs. Not produced
+ // for `LABEL_DETECTION` features.
+ BoundingPoly bounding_poly = 7;
+
+ // The location information for the detected entity. Multiple
+ // `LocationInfo` elements can be present because one location may
+ // indicate the location of the scene in the image, and another location
+ // may indicate the location of the place where the image was taken.
+ // Location information is usually present for landmarks.
+ repeated LocationInfo locations = 8;
+
+ // Some entities may have optional user-supplied `Property` (name/value)
+ // fields, such a score or string that qualifies the entity.
+ repeated Property properties = 9;
+}
+
+// Set of detected objects with bounding boxes.
+message LocalizedObjectAnnotation {
+ // Object ID that should align with EntityAnnotation mid.
+ string mid = 1;
+
+ // The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+ // information, see
+ // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+ string language_code = 2;
+
+ // Object name, expressed in its `language_code` language.
+ string name = 3;
+
+ // Score of the result. Range [0, 1].
+ float score = 4;
+
+ // Image region to which this object belongs. This must be populated.
+ BoundingPoly bounding_poly = 5;
+}
+
+// Set of features pertaining to the image, computed by computer vision
+// methods over safe-search verticals (for example, adult, spoof, medical,
+// violence).
+message SafeSearchAnnotation {
+ // Represents the adult content likelihood for the image. Adult content may
+ // contain elements such as nudity, pornographic images or cartoons, or
+ // sexual activities.
+ Likelihood adult = 1;
+
+ // Spoof likelihood. The likelihood that an modification
+ // was made to the image's canonical version to make it appear
+ // funny or offensive.
+ Likelihood spoof = 2;
+
+ // Likelihood that this is a medical image.
+ Likelihood medical = 3;
+
+ // Likelihood that this image contains violent content.
+ Likelihood violence = 4;
+
+ // Likelihood that the request image contains racy content. Racy content may
+ // include (but is not limited to) skimpy or sheer clothing, strategically
+ // covered nudity, lewd or provocative poses, or close-ups of sensitive
+ // body areas.
+ Likelihood racy = 9;
+}
+
+// Rectangle determined by min and max `LatLng` pairs.
+message LatLongRect {
+ // Min lat/long pair.
+ google.type.LatLng min_lat_lng = 1;
+
+ // Max lat/long pair.
+ google.type.LatLng max_lat_lng = 2;
+}
+
+// Color information consists of RGB channels, score, and the fraction of
+// the image that the color occupies in the image.
+message ColorInfo {
+ // RGB components of the color.
+ google.type.Color color = 1;
+
+ // Image-specific score for this color. Value in range [0, 1].
+ float score = 2;
+
+ // The fraction of pixels the color occupies in the image.
+ // Value in range [0, 1].
+ float pixel_fraction = 3;
+}
+
+// Set of dominant colors and their corresponding scores.
+message DominantColorsAnnotation {
+ // RGB color values with their score and pixel fraction.
+ repeated ColorInfo colors = 1;
+}
+
+// Stores image properties, such as dominant colors.
+message ImageProperties {
+ // If present, dominant colors completed successfully.
+ DominantColorsAnnotation dominant_colors = 1;
+}
+
+// Single crop hint that is used to generate a new crop when serving an image.
+message CropHint {
+ // The bounding polygon for the crop region. The coordinates of the bounding
+ // box are in the original image's scale, as returned in `ImageParams`.
+ BoundingPoly bounding_poly = 1;
+
+ // Confidence of this being a salient region. Range [0, 1].
+ float confidence = 2;
+
+ // Fraction of importance of this salient region with respect to the original
+ // image.
+ float importance_fraction = 3;
+}
+
+// Set of crop hints that are used to generate new crops when serving images.
+message CropHintsAnnotation {
+ // Crop hint results.
+ repeated CropHint crop_hints = 1;
+}
+
+// Parameters for crop hints annotation request.
+message CropHintsParams {
+ // Aspect ratios in floats, representing the ratio of the width to the height
+ // of the image. For example, if the desired aspect ratio is 4/3, the
+ // corresponding float value should be 1.33333. If not specified, the
+ // best possible crop is returned. The number of provided aspect ratios is
+ // limited to a maximum of 16; any aspect ratios provided after the 16th are
+ // ignored.
+ repeated float aspect_ratios = 1;
+}
+
+// Parameters for web detection request.
+message WebDetectionParams {
+ // Whether to include results derived from the geo information in the image.
+ bool include_geo_results = 2;
+}
+
+// Image context and/or feature-specific parameters.
+message ImageContext {
+ // Not used.
+ LatLongRect lat_long_rect = 1;
+
+ // List of languages to use for TEXT_DETECTION. In most cases, an empty value
+ // yields the best results since it enables automatic language detection. For
+ // languages based on the Latin alphabet, setting `language_hints` is not
+ // needed. In rare cases, when the language of the text in the image is known,
+ // setting a hint will help get better results (although it will be a
+ // significant hindrance if the hint is wrong). Text detection returns an
+ // error if one or more of the specified languages is not one of the
+ // [supported languages](/vision/docs/languages).
+ repeated string language_hints = 2;
+
+ // Parameters for crop hints annotation request.
+ CropHintsParams crop_hints_params = 4;
+
+ // Parameters for product search.
+ ProductSearchParams product_search_params = 5;
+
+ // Parameters for web detection.
+ WebDetectionParams web_detection_params = 6;
+}
+
+// Request for performing Google Cloud Vision API tasks over a user-provided
+// image, with user-requested features.
+message AnnotateImageRequest {
+ // The image to be processed.
+ Image image = 1;
+
+ // Requested features.
+ repeated Feature features = 2;
+
+ // Additional context that may accompany the image.
+ ImageContext image_context = 3;
+}
+
+// If an image was produced from a file (e.g. a PDF), this message gives
+// information about the source of that image.
+message ImageAnnotationContext {
+ // The URI of the file used to produce the image.
+ string uri = 1;
+
+ // If the file was a PDF or TIFF, this field gives the page number within
+ // the file used to produce the image.
+ int32 page_number = 2;
+}
+
+// Response to an image annotation request.
+message AnnotateImageResponse {
+ // If present, face detection has completed successfully.
+ repeated FaceAnnotation face_annotations = 1;
+
+ // If present, landmark detection has completed successfully.
+ repeated EntityAnnotation landmark_annotations = 2;
+
+ // If present, logo detection has completed successfully.
+ repeated EntityAnnotation logo_annotations = 3;
+
+ // If present, label detection has completed successfully.
+ repeated EntityAnnotation label_annotations = 4;
+
+ // If present, localized object detection has completed successfully.
+ // This will be sorted descending by confidence score.
+ repeated LocalizedObjectAnnotation localized_object_annotations = 22;
+
+ // If present, text (OCR) detection has completed successfully.
+ repeated EntityAnnotation text_annotations = 5;
+
+ // If present, text (OCR) detection or document (OCR) text detection has
+ // completed successfully.
+ // This annotation provides the structural hierarchy for the OCR detected
+ // text.
+ TextAnnotation full_text_annotation = 12;
+
+ // If present, safe-search annotation has completed successfully.
+ SafeSearchAnnotation safe_search_annotation = 6;
+
+ // If present, image properties were extracted successfully.
+ ImageProperties image_properties_annotation = 8;
+
+ // If present, crop hints have completed successfully.
+ CropHintsAnnotation crop_hints_annotation = 11;
+
+ // If present, web detection has completed successfully.
+ WebDetection web_detection = 13;
+
+ // If present, product search has completed successfully.
+ ProductSearchResults product_search_results = 14;
+
+ // If set, represents the error message for the operation.
+ // Note that filled-in image annotations are guaranteed to be
+ // correct, even when `error` is set.
+ google.rpc.Status error = 9;
+
+ // If present, contextual information is needed to understand where this image
+ // comes from.
+ ImageAnnotationContext context = 21;
+}
+
+// Response to a single file annotation request. A file may contain one or more
+// images, which individually have their own responses.
+message AnnotateFileResponse {
+ // Information about the file for which this response is generated.
+ InputConfig input_config = 1;
+
+ // Individual responses to images found within the file.
+ repeated AnnotateImageResponse responses = 2;
+}
+
+// Multiple image annotation requests are batched into a single service call.
+message BatchAnnotateImagesRequest {
+ // Individual image annotation requests for this batch.
+ repeated AnnotateImageRequest requests = 1;
+}
+
+// Response to a batch image annotation request.
+message BatchAnnotateImagesResponse {
+ // Individual responses to image annotation requests within the batch.
+ repeated AnnotateImageResponse responses = 1;
+}
+
+// An offline file annotation request.
+message AsyncAnnotateFileRequest {
+ // Required. Information about the input file.
+ InputConfig input_config = 1;
+
+ // Required. Requested features.
+ repeated Feature features = 2;
+
+ // Additional context that may accompany the image(s) in the file.
+ ImageContext image_context = 3;
+
+ // Required. The desired output location and metadata (e.g. format).
+ OutputConfig output_config = 4;
+}
+
+// The response for a single offline file annotation request.
+message AsyncAnnotateFileResponse {
+ // The output location and metadata from AsyncAnnotateFileRequest.
+ OutputConfig output_config = 1;
+}
+
+// Multiple async file annotation requests are batched into a single service
+// call.
+message AsyncBatchAnnotateFilesRequest {
+ // Individual async file annotation requests for this batch.
+ repeated AsyncAnnotateFileRequest requests = 1;
+}
+
+// Response to an async batch file annotation request.
+message AsyncBatchAnnotateFilesResponse {
+ // The list of file annotation responses, one for each request in
+ // AsyncBatchAnnotateFilesRequest.
+ repeated AsyncAnnotateFileResponse responses = 1;
+}
+
+// The desired input location and metadata.
+message InputConfig {
+ // The Google Cloud Storage location to read the input from.
+ GcsSource gcs_source = 1;
+
+ // The type of the file. Currently only "application/pdf" and "image/tiff"
+ // are supported. Wildcards are not supported.
+ string mime_type = 2;
+}
+
+// The desired output location and metadata.
+message OutputConfig {
+ // The Google Cloud Storage location to write the output(s) to.
+ GcsDestination gcs_destination = 1;
+
+ // The max number of response protos to put into each output JSON file on
+ // Google Cloud Storage.
+ // The valid range is [1, 100]. If not specified, the default value is 20.
+ //
+ // For example, for one pdf file with 100 pages, 100 response protos will
+ // be generated. If `batch_size` = 20, then 5 json files each
+ // containing 20 response protos will be written under the prefix
+ // `gcs_destination`.`uri`.
+ //
+ // Currently, batch_size only applies to GcsDestination, with potential future
+ // support for other output configurations.
+ int32 batch_size = 2;
+}
+
+// The Google Cloud Storage location where the input will be read from.
+message GcsSource {
+ // Google Cloud Storage URI for the input file. This must only be a
+ // Google Cloud Storage object. Wildcards are not currently supported.
+ string uri = 1;
+}
+
+// The Google Cloud Storage location where the output will be written to.
+message GcsDestination {
+ // Google Cloud Storage URI where the results will be stored. Results will
+ // be in JSON format and preceded by its corresponding input URI. This field
+ // can either represent a single file, or a prefix for multiple outputs.
+ // Prefixes must end in a `/`.
+ //
+ // Examples:
+ //
+ // * File: gs://bucket-name/filename.json
+ // * Prefix: gs://bucket-name/prefix/here/
+ // * File: gs://bucket-name/prefix/here
+ //
+ // If multiple outputs, each response is still AnnotateFileResponse, each of
+ // which contains some subset of the full list of AnnotateImageResponse.
+ // Multiple outputs can happen if, for example, the output JSON is too large
+ // and overflows into multiple sharded files.
+ string uri = 1;
+}
+
+// Contains metadata for the BatchAnnotateImages operation.
+message OperationMetadata {
+ // Batch operation states.
+ enum State {
+ // Invalid.
+ STATE_UNSPECIFIED = 0;
+
+ // Request is received.
+ CREATED = 1;
+
+ // Request is actively being processed.
+ RUNNING = 2;
+
+ // The batch processing is done.
+ DONE = 3;
+
+ // The batch processing was cancelled.
+ CANCELLED = 4;
+ }
+
+ // Current state of the batch operation.
+ State state = 1;
+
+ // The time when the batch request was received.
+ google.protobuf.Timestamp create_time = 5;
+
+ // The time when the operation result was last updated.
+ google.protobuf.Timestamp update_time = 6;
+}
+
+// A bucketized representation of likelihood, which is intended to give clients
+// highly stable results across model upgrades.
+enum Likelihood {
+ // Unknown likelihood.
+ UNKNOWN = 0;
+
+ // It is very unlikely that the image belongs to the specified vertical.
+ VERY_UNLIKELY = 1;
+
+ // It is unlikely that the image belongs to the specified vertical.
+ UNLIKELY = 2;
+
+ // It is possible that the image belongs to the specified vertical.
+ POSSIBLE = 3;
+
+ // It is likely that the image belongs to the specified vertical.
+ LIKELY = 4;
+
+ // It is very likely that the image belongs to the specified vertical.
+ VERY_LIKELY = 5;
+}
diff --git a/google/cloud/vision/v1/product_search.proto b/google/cloud/vision/v1/product_search.proto
new file mode 100644
index 000000000..2b895bdbd
--- /dev/null
+++ b/google/cloud/vision/v1/product_search.proto
@@ -0,0 +1,97 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.vision.v1;
+
+import "google/api/annotations.proto";
+import "google/cloud/vision/v1/geometry.proto";
+import "google/cloud/vision/v1/product_search_service.proto";
+import "google/protobuf/timestamp.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "ProductSearchProto";
+option java_package = "com.google.cloud.vision.v1";
+option objc_class_prefix = "GCVN";
+
+// Parameters for a product search request.
+message ProductSearchParams {
+ // The bounding polygon around the area of interest in the image.
+ // Optional. If it is not specified, system discretion will be applied.
+ BoundingPoly bounding_poly = 9;
+
+ // The resource name of a [ProductSet][google.cloud.vision.v1.ProductSet] to
+ // be searched for similar images.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
+ string product_set = 6;
+
+ // The list of product categories to search in. Currently, we only consider
+ // the first category, and either "homegoods", "apparel", or "toys" should be
+ // specified.
+ repeated string product_categories = 7;
+
+ // The filtering expression. This can be used to restrict search results based
+ // on Product labels. We currently support an AND of OR of key-value
+ // expressions, where each expression within an OR must have the same key.
+ //
+ // For example, "(color = red OR color = blue) AND brand = Google" is
+ // acceptable, but not "(color = red OR brand = Google)" or "color: red".
+ string filter = 8;
+}
+
+// Results for a product search request.
+message ProductSearchResults {
+ // Information about a product.
+ message Result {
+ // The Product.
+ Product product = 1;
+
+ // A confidence level on the match, ranging from 0 (no confidence) to
+ // 1 (full confidence).
+ float score = 2;
+
+ // The resource name of the image from the product that is the closest match
+ // to the query.
+ string image = 3;
+ }
+
+ // Information about the products similar to a single product in a query
+ // image.
+ message GroupedResult {
+ // The bounding polygon around the product detected in the query image.
+ BoundingPoly bounding_poly = 1;
+
+ // List of results, one for each product match.
+ repeated Result results = 2;
+ }
+
+ // Timestamp of the index which provided these results. Changes made after
+ // this time are not reflected in the current results.
+ google.protobuf.Timestamp index_time = 2;
+
+ // List of results, one for each product match.
+ repeated Result results = 5;
+
+ // List of results grouped by products detected in the query image. Each entry
+ // corresponds to one bounding polygon in the query image, and contains the
+ // matching products specific to that region. There may be duplicate product
+ // matches in the union of all the per-product results.
+ repeated GroupedResult product_grouped_results = 6;
+}
diff --git a/google/cloud/vision/v1/product_search_service.proto b/google/cloud/vision/v1/product_search_service.proto
new file mode 100644
index 000000000..5bbfebfb5
--- /dev/null
+++ b/google/cloud/vision/v1/product_search_service.proto
@@ -0,0 +1,849 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.vision.v1;
+
+import "google/api/annotations.proto";
+import "google/cloud/vision/v1/geometry.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "ProductSearchServiceProto";
+option java_package = "com.google.cloud.vision.v1";
+option objc_class_prefix = "GCVN";
+
+// Manages Products and ProductSets of reference images for use in product
+// search. It uses the following resource model:
+//
+// - The API has a collection of [ProductSet][google.cloud.vision.v1.ProductSet]
+// resources, named `projects/*/locations/*/productSets/*`, which acts as a way
+// to put different products into groups to limit identification.
+//
+// In parallel,
+//
+// - The API has a collection of [Product][google.cloud.vision.v1.Product]
+// resources, named
+// `projects/*/locations/*/products/*`
+//
+// - Each [Product][google.cloud.vision.v1.Product] has a collection of
+// [ReferenceImage][google.cloud.vision.v1.ReferenceImage] resources, named
+// `projects/*/locations/*/products/*/referenceImages/*`
+service ProductSearch {
+ // Creates and returns a new ProductSet resource.
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if display_name is missing, or is longer than
+ // 4096 characters.
+ rpc CreateProductSet(CreateProductSetRequest) returns (ProductSet) {
+ option (google.api.http) = {
+ post: "/v1/{parent=projects/*/locations/*}/productSets"
+ body: "product_set"
+ };
+ }
+
+ // Lists ProductSets in an unspecified order.
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if page_size is greater than 100, or less
+ // than 1.
+ rpc ListProductSets(ListProductSetsRequest)
+ returns (ListProductSetsResponse) {
+ option (google.api.http) = {
+ get: "/v1/{parent=projects/*/locations/*}/productSets"
+ };
+ }
+
+ // Gets information associated with a ProductSet.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the ProductSet does not exist.
+ rpc GetProductSet(GetProductSetRequest) returns (ProductSet) {
+ option (google.api.http) = {
+ get: "/v1/{name=projects/*/locations/*/productSets/*}"
+ };
+ }
+
+ // Makes changes to a ProductSet resource.
+ // Only display_name can be updated currently.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the ProductSet does not exist.
+ // * Returns INVALID_ARGUMENT if display_name is present in update_mask but
+ // missing from the request or longer than 4096 characters.
+ rpc UpdateProductSet(UpdateProductSetRequest) returns (ProductSet) {
+ option (google.api.http) = {
+ patch: "/v1/{product_set.name=projects/*/locations/*/productSets/*}"
+ body: "product_set"
+ };
+ }
+
+ // Permanently deletes a ProductSet. Products and ReferenceImages in the
+ // ProductSet are not deleted.
+ //
+ // The actual image files are not deleted from Google Cloud Storage.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the ProductSet does not exist.
+ rpc DeleteProductSet(DeleteProductSetRequest)
+ returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ delete: "/v1/{name=projects/*/locations/*/productSets/*}"
+ };
+ }
+
+ // Creates and returns a new product resource.
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if display_name is missing or longer than 4096
+ // characters.
+ // * Returns INVALID_ARGUMENT if description is longer than 4096 characters.
+ // * Returns INVALID_ARGUMENT if product_category is missing or invalid.
+ rpc CreateProduct(CreateProductRequest) returns (Product) {
+ option (google.api.http) = {
+ post: "/v1/{parent=projects/*/locations/*}/products"
+ body: "product"
+ };
+ }
+
+ // Lists products in an unspecified order.
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if page_size is greater than 100 or less than 1.
+ rpc ListProducts(ListProductsRequest) returns (ListProductsResponse) {
+ option (google.api.http) = {
+ get: "/v1/{parent=projects/*/locations/*}/products"
+ };
+ }
+
+ // Gets information associated with a Product.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the Product does not exist.
+ rpc GetProduct(GetProductRequest) returns (Product) {
+ option (google.api.http) = {
+ get: "/v1/{name=projects/*/locations/*/products/*}"
+ };
+ }
+
+ // Makes changes to a Product resource.
+ // Only the `display_name`, `description`, and `labels` fields can be updated
+ // right now.
+ //
+ // If labels are updated, the change will not be reflected in queries until
+ // the next index time.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the Product does not exist.
+ // * Returns INVALID_ARGUMENT if display_name is present in update_mask but is
+ // missing from the request or longer than 4096 characters.
+ // * Returns INVALID_ARGUMENT if description is present in update_mask but is
+ // longer than 4096 characters.
+ // * Returns INVALID_ARGUMENT if product_category is present in update_mask.
+ rpc UpdateProduct(UpdateProductRequest) returns (Product) {
+ option (google.api.http) = {
+ patch: "/v1/{product.name=projects/*/locations/*/products/*}"
+ body: "product"
+ };
+ }
+
+ // Permanently deletes a product and its reference images.
+ //
+ // Metadata of the product and all its images will be deleted right away, but
+ // search queries against ProductSets containing the product may still work
+ // until all related caches are refreshed.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the product does not exist.
+ rpc DeleteProduct(DeleteProductRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ delete: "/v1/{name=projects/*/locations/*/products/*}"
+ };
+ }
+
+ // Creates and returns a new ReferenceImage resource.
+ //
+ // The `bounding_poly` field is optional. If `bounding_poly` is not specified,
+ // the system will try to detect regions of interest in the image that are
+ // compatible with the product_category on the parent product. If it is
+ // specified, detection is ALWAYS skipped. The system converts polygons into
+ // non-rotated rectangles.
+ //
+ // Note that the pipeline will resize the image if the image resolution is too
+ // large to process (above 50MP).
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if the image_uri is missing or longer than 4096
+ // characters.
+ // * Returns INVALID_ARGUMENT if the product does not exist.
+ // * Returns INVALID_ARGUMENT if bounding_poly is not provided, and nothing
+ // compatible with the parent product's product_category is detected.
+ // * Returns INVALID_ARGUMENT if bounding_poly contains more than 10 polygons.
+ rpc CreateReferenceImage(CreateReferenceImageRequest)
+ returns (ReferenceImage) {
+ option (google.api.http) = {
+ post: "/v1/{parent=projects/*/locations/*/products/*}/referenceImages"
+ body: "reference_image"
+ };
+ }
+
+ // Permanently deletes a reference image.
+ //
+ // The image metadata will be deleted right away, but search queries
+ // against ProductSets containing the image may still work until all related
+ // caches are refreshed.
+ //
+ // The actual image files are not deleted from Google Cloud Storage.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the reference image does not exist.
+ rpc DeleteReferenceImage(DeleteReferenceImageRequest)
+ returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ delete: "/v1/{name=projects/*/locations/*/products/*/referenceImages/*}"
+ };
+ }
+
+ // Lists reference images.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the parent product does not exist.
+ // * Returns INVALID_ARGUMENT if the page_size is greater than 100, or less
+ // than 1.
+ rpc ListReferenceImages(ListReferenceImagesRequest)
+ returns (ListReferenceImagesResponse) {
+ option (google.api.http) = {
+ get: "/v1/{parent=projects/*/locations/*/products/*}/referenceImages"
+ };
+ }
+
+ // Gets information associated with a ReferenceImage.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the specified image does not exist.
+ rpc GetReferenceImage(GetReferenceImageRequest) returns (ReferenceImage) {
+ option (google.api.http) = {
+ get: "/v1/{name=projects/*/locations/*/products/*/referenceImages/*}"
+ };
+ }
+
+ // Adds a Product to the specified ProductSet. If the Product is already
+ // present, no change is made.
+ //
+ // One Product can be added to at most 100 ProductSets.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the Product or the ProductSet doesn't exist.
+ rpc AddProductToProductSet(AddProductToProductSetRequest)
+ returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ post: "/v1/{name=projects/*/locations/*/productSets/*}:addProduct"
+ body: "*"
+ };
+ }
+
+ // Removes a Product from the specified ProductSet.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND If the Product is not found under the ProductSet.
+ rpc RemoveProductFromProductSet(RemoveProductFromProductSetRequest)
+ returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ post: "/v1/{name=projects/*/locations/*/productSets/*}:removeProduct"
+ body: "*"
+ };
+ }
+
+ // Lists the Products in a ProductSet, in an unspecified order. If the
+ // ProductSet does not exist, the products field of the response will be
+ // empty.
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if page_size is greater than 100 or less than 1.
+ rpc ListProductsInProductSet(ListProductsInProductSetRequest)
+ returns (ListProductsInProductSetResponse) {
+ option (google.api.http) = {
+ get: "/v1/{name=projects/*/locations/*/productSets/*}/products"
+ };
+ }
+
+ // Asynchronous API that imports a list of reference images to specified
+ // product sets based on a list of image information.
+ //
+ // The [google.longrunning.Operation][google.longrunning.Operation] API can be
+ // used to keep track of the progress and results of the request.
+ // `Operation.metadata` contains `BatchOperationMetadata`. (progress)
+ // `Operation.response` contains `ImportProductSetsResponse`. (results)
+ //
+ // The input source of this method is a csv file on Google Cloud Storage.
+ // For the format of the csv file please see
+ // [ImportProductSetsGcsSource.csv_file_uri][google.cloud.vision.v1.ImportProductSetsGcsSource.csv_file_uri].
+ rpc ImportProductSets(ImportProductSetsRequest)
+ returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1/{parent=projects/*/locations/*}/productSets:import"
+ body: "*"
+ };
+ }
+}
+
+// A Product contains ReferenceImages.
+message Product {
+ // A product label represented as a key-value pair.
+ message KeyValue {
+ // The key of the label attached to the product. Cannot be empty and cannot
+ // exceed 128 bytes.
+ string key = 1;
+
+ // The value of the label attached to the product. Cannot be empty and
+ // cannot exceed 128 bytes.
+ string value = 2;
+ }
+
+ // The resource name of the product.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
+ //
+ // This field is ignored when creating a product.
+ string name = 1;
+
+ // The user-provided name for this Product. Must not be empty. Must be at most
+ // 4096 characters long.
+ string display_name = 2;
+
+ // User-provided metadata to be stored with this product. Must be at most 4096
+ // characters long.
+ string description = 3;
+
+ // The category for the product identified by the reference image. This should
+ // be either "homegoods", "apparel", or "toys".
+ //
+ // This field is immutable.
+ string product_category = 4;
+
+ // Key-value pairs that can be attached to a product. At query time,
+ // constraints can be specified based on the product_labels.
+ //
+ // Note that integer values can be provided as strings, e.g. "1199". Only
+ // strings with integer values can match a range-based restriction which is
+ // to be supported soon.
+ //
+ // Multiple values can be assigned to the same key. One product may have up to
+ // 100 product_labels.
+ repeated KeyValue product_labels = 5;
+}
+
+// A ProductSet contains Products. A ProductSet can contain a maximum of 1
+// million reference images. If the limit is exceeded, periodic indexing will
+// fail.
+message ProductSet {
+ // The resource name of the ProductSet.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
+ //
+ // This field is ignored when creating a ProductSet.
+ string name = 1;
+
+ // The user-provided name for this ProductSet. Must not be empty. Must be at
+ // most 4096 characters long.
+ string display_name = 2;
+
+ // Output only. The time at which this ProductSet was last indexed. Query
+ // results will reflect all updates before this time. If this ProductSet has
+ // never been indexed, this field is 0.
+ //
+ // This field is ignored when creating a ProductSet.
+ google.protobuf.Timestamp index_time = 3;
+
+ // Output only. If there was an error with indexing the product set, the field
+ // is populated.
+ //
+ // This field is ignored when creating a ProductSet.
+ google.rpc.Status index_error = 4;
+}
+
+// A `ReferenceImage` represents a product image and its associated metadata,
+// such as bounding boxes.
+message ReferenceImage {
+ // The resource name of the reference image.
+ //
+ // Format is:
+ //
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID/referenceImages/IMAGE_ID`.
+ //
+ // This field is ignored when creating a reference image.
+ string name = 1;
+
+ // The Google Cloud Storage URI of the reference image.
+ //
+ // The URI must start with `gs://`.
+ //
+ // Required.
+ string uri = 2;
+
+ // Bounding polygons around the areas of interest in the reference image.
+ // Optional. If this field is empty, the system will try to detect regions of
+ // interest. At most 10 bounding polygons will be used.
+ //
+ // The provided shape is converted into a non-rotated rectangle. Once
+ // converted, the small edge of the rectangle must be greater than or equal
+ // to 300 pixels. The aspect ratio must be 1:4 or less (i.e. 1:3 is ok; 1:5
+ // is not).
+ repeated BoundingPoly bounding_polys = 3;
+}
+
+// Request message for the `CreateProduct` method.
+message CreateProductRequest {
+ // The project in which the Product should be created.
+ //
+ // Format is
+ // `projects/PROJECT_ID/locations/LOC_ID`.
+ string parent = 1;
+
+ // The product to create.
+ Product product = 2;
+
+ // A user-supplied resource id for this Product. If set, the server will
+ // attempt to use this value as the resource id. If it is already in use, an
+ // error is returned with code ALREADY_EXISTS. Must be at most 128 characters
+ // long. It cannot contain the character `/`.
+ string product_id = 3;
+}
+
+// Request message for the `ListProducts` method.
+message ListProductsRequest {
+ // The project OR ProductSet from which Products should be listed.
+ //
+ // Format:
+ // `projects/PROJECT_ID/locations/LOC_ID`
+ string parent = 1;
+
+ // The maximum number of items to return. Default 10, maximum 100.
+ int32 page_size = 2;
+
+ // The next_page_token returned from a previous List request, if any.
+ string page_token = 3;
+}
+
+// Response message for the `ListProducts` method.
+message ListProductsResponse {
+ // List of products.
+ repeated Product products = 1;
+
+ // Token to retrieve the next page of results, or empty if there are no more
+ // results in the list.
+ string next_page_token = 2;
+}
+
+// Request message for the `GetProduct` method.
+message GetProductRequest {
+ // Resource name of the Product to get.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ string name = 1;
+}
+
+// Request message for the `UpdateProduct` method.
+message UpdateProductRequest {
+ // The Product resource which replaces the one on the server.
+ // product.name is immutable.
+ Product product = 1;
+
+ // The [FieldMask][google.protobuf.FieldMask] that specifies which fields
+ // to update.
+ // If update_mask isn't specified, all mutable fields are to be updated.
+ // Valid mask paths include `product_labels`, `display_name`, and
+ // `description`.
+ google.protobuf.FieldMask update_mask = 2;
+}
+
+// Request message for the `DeleteProduct` method.
+message DeleteProductRequest {
+ // Resource name of product to delete.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ string name = 1;
+}
+
+// Request message for the `CreateProductSet` method.
+message CreateProductSetRequest {
+ // The project in which the ProductSet should be created.
+ //
+ // Format is `projects/PROJECT_ID/locations/LOC_ID`.
+ string parent = 1;
+
+ // The ProductSet to create.
+ ProductSet product_set = 2;
+
+ // A user-supplied resource id for this ProductSet. If set, the server will
+ // attempt to use this value as the resource id. If it is already in use, an
+ // error is returned with code ALREADY_EXISTS. Must be at most 128 characters
+ // long. It cannot contain the character `/`.
+ string product_set_id = 3;
+}
+
+// Request message for the `ListProductSets` method.
+message ListProductSetsRequest {
+ // The project from which ProductSets should be listed.
+ //
+ // Format is `projects/PROJECT_ID/locations/LOC_ID`.
+ string parent = 1;
+
+ // The maximum number of items to return. Default 10, maximum 100.
+ int32 page_size = 2;
+
+ // The next_page_token returned from a previous List request, if any.
+ string page_token = 3;
+}
+
+// Response message for the `ListProductSets` method.
+message ListProductSetsResponse {
+ // List of ProductSets.
+ repeated ProductSet product_sets = 1;
+
+ // Token to retrieve the next page of results, or empty if there are no more
+ // results in the list.
+ string next_page_token = 2;
+}
+
+// Request message for the `GetProductSet` method.
+message GetProductSetRequest {
+ // Resource name of the ProductSet to get.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOG_ID/productSets/PRODUCT_SET_ID`
+ string name = 1;
+}
+
+// Request message for the `UpdateProductSet` method.
+message UpdateProductSetRequest {
+ // The ProductSet resource which replaces the one on the server.
+ ProductSet product_set = 1;
+
+ // The [FieldMask][google.protobuf.FieldMask] that specifies which fields to
+ // update.
+ // If update_mask isn't specified, all mutable fields are to be updated.
+ // Valid mask path is `display_name`.
+ google.protobuf.FieldMask update_mask = 2;
+}
+
+// Request message for the `DeleteProductSet` method.
+message DeleteProductSetRequest {
+ // Resource name of the ProductSet to delete.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ string name = 1;
+}
+
+// Request message for the `CreateReferenceImage` method.
+message CreateReferenceImageRequest {
+ // Resource name of the product in which to create the reference image.
+ //
+ // Format is
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
+ string parent = 1;
+
+ // The reference image to create.
+ // If an image ID is specified, it is ignored.
+ ReferenceImage reference_image = 2;
+
+ // A user-supplied resource id for the ReferenceImage to be added. If set,
+ // the server will attempt to use this value as the resource id. If it is
+ // already in use, an error is returned with code ALREADY_EXISTS. Must be at
+ // most 128 characters long. It cannot contain the character `/`.
+ string reference_image_id = 3;
+}
+
+// Request message for the `ListReferenceImages` method.
+message ListReferenceImagesRequest {
+ // Resource name of the product containing the reference images.
+ //
+ // Format is
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
+ string parent = 1;
+
+ // The maximum number of items to return. Default 10, maximum 100.
+ int32 page_size = 2;
+
+ // A token identifying a page of results to be returned. This is the value
+ // of `nextPageToken` returned in a previous reference image list request.
+ //
+ // Defaults to the first page if not specified.
+ string page_token = 3;
+}
+
+// Response message for the `ListReferenceImages` method.
+message ListReferenceImagesResponse {
+ // The list of reference images.
+ repeated ReferenceImage reference_images = 1;
+
+ // The maximum number of items to return. Default 10, maximum 100.
+ int32 page_size = 2;
+
+ // The next_page_token returned from a previous List request, if any.
+ string next_page_token = 3;
+}
+
+// Request message for the `GetReferenceImage` method.
+message GetReferenceImageRequest {
+ // The resource name of the ReferenceImage to get.
+ //
+ // Format is:
+ //
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID/referenceImages/IMAGE_ID`.
+ string name = 1;
+}
+
+// Request message for the `DeleteReferenceImage` method.
+message DeleteReferenceImageRequest {
+ // The resource name of the reference image to delete.
+ //
+ // Format is:
+ //
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID/referenceImages/IMAGE_ID`
+ string name = 1;
+}
+
+// Request message for the `AddProductToProductSet` method.
+message AddProductToProductSetRequest {
+ // The resource name for the ProductSet to modify.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ string name = 1;
+
+ // The resource name for the Product to be added to this ProductSet.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ string product = 2;
+}
+
+// Request message for the `RemoveProductFromProductSet` method.
+message RemoveProductFromProductSetRequest {
+ // The resource name for the ProductSet to modify.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ string name = 1;
+
+ // The resource name for the Product to be removed from this ProductSet.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ string product = 2;
+}
+
+// Request message for the `ListProductsInProductSet` method.
+message ListProductsInProductSetRequest {
+ // The ProductSet resource for which to retrieve Products.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ string name = 1;
+
+ // The maximum number of items to return. Default 10, maximum 100.
+ int32 page_size = 2;
+
+ // The next_page_token returned from a previous List request, if any.
+ string page_token = 3;
+}
+
+// Response message for the `ListProductsInProductSet` method.
+message ListProductsInProductSetResponse {
+ // The list of Products.
+ repeated Product products = 1;
+
+ // Token to retrieve the next page of results, or empty if there are no more
+ // results in the list.
+ string next_page_token = 2;
+}
+
+// The Google Cloud Storage location for a csv file which preserves a list of
+// ImportProductSetRequests in each line.
+message ImportProductSetsGcsSource {
+ // The Google Cloud Storage URI of the input csv file.
+ //
+ // The URI must start with `gs://`.
+ //
+ // The format of the input csv file should be one image per line.
+ // In each line, there are 8 columns.
+ //
+ // 1. image-uri
+ // 2. image-id
+ // 3. product-set-id
+ // 4. product-id
+ // 5. product-category
+ // 6. product-display-name
+ // 7. labels
+ // 8. bounding-poly
+ //
+ // The `image-uri`, `product-set-id`, `product-id`, and `product-category`
+ // columns are required. All other columns are optional.
+ //
+ // If the `ProductSet` or `Product` specified by the `product-set-id` and
+ // `product-id` values does not exist, then the system will create a new
+ // `ProductSet` or `Product` for the image. In this case, the
+ // `product-display-name` column refers to
+ // [display_name][google.cloud.vision.v1.Product.display_name], the
+ // `product-category` column refers to
+ // [product_category][google.cloud.vision.v1.Product.product_category], and
+ // the `labels` column refers to
+ // [product_labels][google.cloud.vision.v1.Product.product_labels].
+ //
+ // The `image-id` column is optional but must be unique if provided. If it is
+ // empty, the system will automatically assign a unique id to the image.
+ //
+ // The `product-display-name` column is optional. If it is empty, the system
+ // sets the [display_name][google.cloud.vision.v1.Product.display_name] field
+ // for the product to a space (" "). You can update the `display_name` later
+ // by using the API.
+ //
+ // If a `Product` with the specified `product-id` already exists, then the
+ // system ignores the `product-display-name`, `product-category`, and `labels`
+ // columns.
+ //
+ // The `labels` column (optional) is a line containing a list of
+ // comma-separated key-value pairs, in the following format:
+ //
+ // "key_1=value_1,key_2=value_2,...,key_n=value_n"
+ //
+ // The `bounding-poly` column (optional) identifies one region of
+ // interest from the image in the same manner as `CreateReferenceImage`. If
+ // you do not specify the `bounding-poly` column, then the system will try to
+ // detect regions of interest automatically.
+ //
+ // At most one `bounding-poly` column is allowed per line. If the image
+ // contains multiple regions of interest, add a line to the CSV file that
+ // includes the same product information, and the `bounding-poly` values for
+ // each region of interest.
+ //
+ // The `bounding-poly` column must contain an even number of comma-separated
+ // numbers, in the format "p1_x,p1_y,p2_x,p2_y,...,pn_x,pn_y". Use
+ // non-negative integers for absolute bounding polygons, and float values
+ // in [0, 1] for normalized bounding polygons.
+ //
+ // The system will resize the image if the image resolution is too
+ // large to process (larger than 20MP).
+ string csv_file_uri = 1;
+}
+
+// The input content for the `ImportProductSets` method.
+message ImportProductSetsInputConfig {
+ // The source of the input.
+ oneof source {
+ // The Google Cloud Storage location for a csv file which preserves a list
+ // of ImportProductSetRequests in each line.
+ ImportProductSetsGcsSource gcs_source = 1;
+ }
+}
+
+// Request message for the `ImportProductSets` method.
+message ImportProductSetsRequest {
+ // The project in which the ProductSets should be imported.
+ //
+ // Format is `projects/PROJECT_ID/locations/LOC_ID`.
+ string parent = 1;
+
+ // The input content for the list of requests.
+ ImportProductSetsInputConfig input_config = 2;
+}
+
+// Response message for the `ImportProductSets` method.
+//
+// This message is returned by the
+// [google.longrunning.Operations.GetOperation][google.longrunning.Operations.GetOperation]
+// method in the returned
+// [google.longrunning.Operation.response][google.longrunning.Operation.response]
+// field.
+message ImportProductSetsResponse {
+ // The list of reference_images that are imported successfully.
+ repeated ReferenceImage reference_images = 1;
+
+ // The rpc status for each ImportProductSet request, including both successes
+ // and errors.
+ //
+ // The number of statuses here matches the number of lines in the csv file,
+ // and statuses[i] stores the success or failure status of processing the i-th
+ // line of the csv, starting from line 0.
+ repeated google.rpc.Status statuses = 2;
+}
+
+// Metadata for the batch operations such as the current state.
+//
+// This is included in the `metadata` field of the `Operation` returned by the
+// `GetOperation` call of the `google::longrunning::Operations` service.
+message BatchOperationMetadata {
+ // Enumerates the possible states that the batch request can be in.
+ enum State {
+ // Invalid.
+ STATE_UNSPECIFIED = 0;
+
+ // Request is actively being processed.
+ PROCESSING = 1;
+
+ // The request is done and at least one item has been successfully
+ // processed.
+ SUCCESSFUL = 2;
+
+ // The request is done and no item has been successfully processed.
+ FAILED = 3;
+
+ // The request is done after the longrunning.Operations.CancelOperation has
+ // been called by the user. Any records that were processed before the
+ // cancel command are output as specified in the request.
+ CANCELLED = 4;
+ }
+
+ // The current state of the batch operation.
+ State state = 1;
+
+ // The time when the batch request was submitted to the server.
+ google.protobuf.Timestamp submit_time = 2;
+
+ // The time when the batch request is finished and
+ // [google.longrunning.Operation.done][google.longrunning.Operation.done] is
+ // set to true.
+ google.protobuf.Timestamp end_time = 3;
+}
diff --git a/google/cloud/vision/v1/text_annotation.proto b/google/cloud/vision/v1/text_annotation.proto
new file mode 100644
index 000000000..417e4f514
--- /dev/null
+++ b/google/cloud/vision/v1/text_annotation.proto
@@ -0,0 +1,261 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.vision.v1;
+
+import "google/api/annotations.proto";
+import "google/cloud/vision/v1/geometry.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "TextAnnotationProto";
+option java_package = "com.google.cloud.vision.v1";
+option objc_class_prefix = "GCVN";
+
+// TextAnnotation contains a structured representation of OCR extracted text.
+// The hierarchy of an OCR extracted text structure is like this:
+// TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
+// Each structural component, starting from Page, may further have their own
+// properties. Properties describe detected languages, breaks etc.. Please refer
+// to the
+// [TextAnnotation.TextProperty][google.cloud.vision.v1.TextAnnotation.TextProperty]
+// message definition below for more detail.
+message TextAnnotation {
+ // Detected language for a structural component.
+ message DetectedLanguage {
+ // The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+ // information, see
+ // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+ string language_code = 1;
+
+ // Confidence of detected language. Range [0, 1].
+ float confidence = 2;
+ }
+
+ // Detected start or end of a structural component.
+ message DetectedBreak {
+ // Enum to denote the type of break found. New line, space etc.
+ enum BreakType {
+ // Unknown break label type.
+ UNKNOWN = 0;
+
+ // Regular space.
+ SPACE = 1;
+
+ // Sure space (very wide).
+ SURE_SPACE = 2;
+
+ // Line-wrapping break.
+ EOL_SURE_SPACE = 3;
+
+ // End-line hyphen that is not present in text; does not co-occur with
+ // `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`.
+ HYPHEN = 4;
+
+ // Line break that ends a paragraph.
+ LINE_BREAK = 5;
+ }
+
+ // Detected break type.
+ BreakType type = 1;
+
+ // True if break prepends the element.
+ bool is_prefix = 2;
+ }
+
+ // Additional information detected on the structural component.
+ message TextProperty {
+ // A list of detected languages together with confidence.
+ repeated DetectedLanguage detected_languages = 1;
+
+ // Detected start or end of a text segment.
+ DetectedBreak detected_break = 2;
+ }
+
+ // List of pages detected by OCR.
+ repeated Page pages = 1;
+
+ // UTF-8 text detected on the pages.
+ string text = 2;
+}
+
+// Detected page from OCR.
+message Page {
+ // Additional information detected on the page.
+ TextAnnotation.TextProperty property = 1;
+
+ // Page width. For PDFs the unit is points. For images (including
+ // TIFFs) the unit is pixels.
+ int32 width = 2;
+
+ // Page height. For PDFs the unit is points. For images (including
+ // TIFFs) the unit is pixels.
+ int32 height = 3;
+
+ // List of blocks of text, images etc on this page.
+ repeated Block blocks = 4;
+
+ // Confidence of the OCR results on the page. Range [0, 1].
+ float confidence = 5;
+}
+
+// Logical element on the page.
+message Block {
+ // Type of a block (text, image etc) as identified by OCR.
+ enum BlockType {
+ // Unknown block type.
+ UNKNOWN = 0;
+
+ // Regular text block.
+ TEXT = 1;
+
+ // Table block.
+ TABLE = 2;
+
+ // Image block.
+ PICTURE = 3;
+
+ // Horizontal/vertical line box.
+ RULER = 4;
+
+ // Barcode block.
+ BARCODE = 5;
+ }
+
+ // Additional information detected for the block.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the block.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ //
+ // * when the text is horizontal it might look like:
+ //
+ // 0----1
+ // | |
+ // 3----2
+ //
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ //
+ // 2----3
+ // | |
+ // 1----0
+ //
+ // and the vertex order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // List of paragraphs in this block (if this blocks is of type text).
+ repeated Paragraph paragraphs = 3;
+
+ // Detected block type (text, image etc) for this block.
+ BlockType block_type = 4;
+
+ // Confidence of the OCR results on the block. Range [0, 1].
+ float confidence = 5;
+}
+
+// Structural unit of text representing a number of words in certain order.
+message Paragraph {
+ // Additional information detected for the paragraph.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the paragraph.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ // * when the text is horizontal it might look like:
+ // 0----1
+ // | |
+ // 3----2
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ // 2----3
+ // | |
+ // 1----0
+ // and the vertex order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // List of words in this paragraph.
+ repeated Word words = 3;
+
+ // Confidence of the OCR results for the paragraph. Range [0, 1].
+ float confidence = 4;
+}
+
+// A word representation.
+message Word {
+ // Additional information detected for the word.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the word.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ // * when the text is horizontal it might look like:
+ // 0----1
+ // | |
+ // 3----2
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ // 2----3
+ // | |
+ // 1----0
+ // and the vertex order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // List of symbols in the word.
+ // The order of the symbols follows the natural reading order.
+ repeated Symbol symbols = 3;
+
+ // Confidence of the OCR results for the word. Range [0, 1].
+ float confidence = 4;
+}
+
+// A single symbol representation.
+message Symbol {
+ // Additional information detected for the symbol.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the symbol.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ // * when the text is horizontal it might look like:
+ // 0----1
+ // | |
+ // 3----2
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ // 2----3
+ // | |
+ // 1----0
+ // and the vertice order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // The actual UTF-8 representation of the symbol.
+ string text = 3;
+
+ // Confidence of the OCR results for the symbol. Range [0, 1].
+ float confidence = 4;
+}
diff --git a/google/cloud/vision/v1/vision_gapic.yaml b/google/cloud/vision/v1/vision_gapic.yaml
new file mode 100644
index 000000000..29700d344
--- /dev/null
+++ b/google/cloud/vision/v1/vision_gapic.yaml
@@ -0,0 +1,457 @@
+type: com.google.api.codegen.ConfigProto
+config_schema_version: 1.0.0
+language_settings:
+ java:
+ package_name: com.google.cloud.vision.v1
+ release_level: GA
+ python:
+ package_name: google.cloud.vision_v1.gapic
+ go:
+ package_name: cloud.google.com/go/vision/apiv1
+ release_level: GA
+ csharp:
+ package_name: Google.Cloud.Vision.V1
+ release_level: GA
+ ruby:
+ package_name: Google::Cloud::Vision::V1
+ php:
+ package_name: Google\Cloud\Vision\V1
+ nodejs:
+ package_name: vision.v1
+ domain_layer_location: google-cloud
+interfaces:
+- name: google.cloud.vision.v1.ImageAnnotator
+ smoke_test:
+ method: BatchAnnotateImages
+ init_fields:
+ - requests[0].image.source.gcs_image_uri="gs://gapic-toolkit/President_Barack_Obama.jpg"
+ - requests[0].features[0].type=FACE_DETECTION
+ collections: []
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes: []
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 60000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 60000
+ total_timeout_millis: 600000
+ methods:
+ - name: BatchAnnotateImages
+ flattening:
+ groups:
+ - parameters:
+ - requests
+ required_fields:
+ - requests
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: AsyncBatchAnnotateFiles
+ flattening:
+ groups:
+ - parameters:
+ - requests
+ required_fields:
+ - requests
+ long_running:
+ return_type: google.cloud.vision.v1.AsyncBatchAnnotateFilesResponse
+ metadata_type: google.cloud.vision.v1.OperationMetadata
+ initial_poll_delay_millis: 20000
+ poll_delay_multiplier: 1.5
+ max_poll_delay_millis: 45000
+ total_poll_timeout_millis: 86400000
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+- name: google.cloud.vision.v1.ProductSearch
+ collections:
+ - name_pattern: projects/{project}/locations/{location}
+ entity_name: location
+ - name_pattern: projects/{project}/locations/{location}/productSets/{product_set}
+ entity_name: product_set
+ - name_pattern: projects/{project}/locations/{location}/products/{product}
+ entity_name: product
+ - name_pattern: projects/{project}/locations/{location}/products/{product}/referenceImages/{reference_image}
+ entity_name: reference_image
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes: []
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 60000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 60000
+ total_timeout_millis: 600000
+ methods:
+ - name: CreateProduct
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - product
+ - product_id
+ required_fields:
+ - parent
+ - product
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ field_name_patterns:
+ parent: location
+ - name: ListProducts
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ required_fields:
+ - parent
+ resource_name_treatment: STATIC_TYPES
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: products
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ field_name_patterns:
+ parent: location
+ - name: GetProduct
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ field_name_patterns:
+ name: product
+ - name: UpdateProduct
+ flattening:
+ groups:
+ - parameters:
+ - product
+ - update_mask
+ required_fields:
+ - product
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ field_name_patterns:
+ product.name: product
+ - name: DeleteProduct
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ field_name_patterns:
+ name: product
+ - name: ListReferenceImages
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ required_fields:
+ - parent
+ resource_name_treatment: STATIC_TYPES
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: reference_images
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ field_name_patterns:
+ parent: product
+ - name: GetReferenceImage
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: reference_image
+ timeout_millis: 60000
+ - name: DeleteReferenceImage
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: reference_image
+ timeout_millis: 60000
+ - name: CreateReferenceImage
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - reference_image
+ - reference_image_id
+ required_fields:
+ - parent
+ - reference_image
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ field_name_patterns:
+ parent: product
+ - name: CreateProductSet
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - product_set
+ - product_set_id
+ required_fields:
+ - parent
+ - product_set
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ field_name_patterns:
+ parent: location
+ - name: ListProductSets
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ required_fields:
+ - parent
+ resource_name_treatment: STATIC_TYPES
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: product_sets
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ field_name_patterns:
+ parent: location
+ - name: GetProductSet
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ field_name_patterns:
+ name: product_set
+ - name: UpdateProductSet
+ flattening:
+ groups:
+ - parameters:
+ - product_set
+ - update_mask
+ required_fields:
+ - product_set
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ field_name_patterns:
+ product_set.name: product_set
+ - name: DeleteProductSet
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ field_name_patterns:
+ name: product_set
+ - name: AddProductToProductSet
+ flattening:
+ groups:
+ - parameters:
+ - name
+ - product
+ required_fields:
+ - name
+ - product
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ field_name_patterns:
+ name: product_set
+ - name: RemoveProductFromProductSet
+ flattening:
+ groups:
+ - parameters:
+ - name
+ - product
+ required_fields:
+ - name
+ - product
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ field_name_patterns:
+ name: product_set
+ - name: ListProductsInProductSet
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ resource_name_treatment: STATIC_TYPES
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: products
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ field_name_patterns:
+ name: product_set
+ - name: ImportProductSets
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - input_config
+ required_fields:
+ - parent
+ - input_config
+ long_running:
+ return_type: google.cloud.vision.v1.ImportProductSetsResponse
+ metadata_type: google.cloud.vision.v1.BatchOperationMetadata
+ initial_poll_delay_millis: 20000
+ poll_delay_multiplier: 1.5
+ max_poll_delay_millis: 45000
+ total_poll_timeout_millis: 86400000
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ field_name_patterns:
+ parent: location
+# Force string formatting functions to be generated.
+enable_string_format_functions_override: true
+resource_name_generation:
+- message_name: CreateProductSetRequest
+ field_entity_map:
+ parent: location
+- message_name: ListProductSetsRequest
+ field_entity_map:
+ parent: location
+- message_name: GetProductSetRequest
+ field_entity_map:
+ name: product_set
+- message_name: UpdateProductSetRequest
+ field_entity_map:
+ product_set.name: product_set
+- message_name: DeleteProductSetRequest
+ field_entity_map:
+ name: product_set
+- message_name: CreateProductRequest
+ field_entity_map:
+ parent: location
+- message_name: ListProductsRequest
+ field_entity_map:
+ parent: location
+- message_name: GetProductRequest
+ field_entity_map:
+ name: product
+- message_name: UpdateProductRequest
+ field_entity_map:
+ product.name: product
+- message_name: DeleteProductRequest
+ field_entity_map:
+ name: product
+- message_name: CreateReferenceImageRequest
+ field_entity_map:
+ parent: product
+- message_name: DeleteReferenceImageRequest
+ field_entity_map:
+ name: reference_image
+- message_name: ListReferenceImagesRequest
+ field_entity_map:
+ parent: product
+- message_name: GetReferenceImageRequest
+ field_entity_map:
+ name: reference_image
+- message_name: AddProductToProductSetRequest
+ field_entity_map:
+ name: product_set
+ product: product
+- message_name: RemoveProductFromProductSetRequest
+ field_entity_map:
+ name: product_set
+ product: product
+- message_name: ListProductsInProductSetRequest
+ field_entity_map:
+ name: product_set
+- message_name: ImportProductSetsRequest
+ field_entity_map:
+ parent: location
+- message_name: Product
+ field_entity_map:
+ name: product
+- message_name: ProductSet
+ field_entity_map:
+ name: product_set
+- message_name: ReferenceImage
+ field_entity_map:
+ name: reference_image
+- message_name: ProductSearchParams
+ field_entity_map:
+ product_set: product_set
diff --git a/google/cloud/vision/v1/web_detection.proto b/google/cloud/vision/v1/web_detection.proto
new file mode 100644
index 000000000..2d3c4a86a
--- /dev/null
+++ b/google/cloud/vision/v1/web_detection.proto
@@ -0,0 +1,107 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.vision.v1;
+
+import "google/api/annotations.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "WebDetectionProto";
+option java_package = "com.google.cloud.vision.v1";
+option objc_class_prefix = "GCVN";
+
+// Relevant information for the image from the Internet.
+message WebDetection {
+ // Entity deduced from similar images on the Internet.
+ message WebEntity {
+ // Opaque entity ID.
+ string entity_id = 1;
+
+ // Overall relevancy score for the entity.
+ // Not normalized and not comparable across different image queries.
+ float score = 2;
+
+ // Canonical description of the entity, in English.
+ string description = 3;
+ }
+
+ // Metadata for online images.
+ message WebImage {
+ // The result image URL.
+ string url = 1;
+
+ // (Deprecated) Overall relevancy score for the image.
+ float score = 2;
+ }
+
+ // Metadata for web pages.
+ message WebPage {
+ // The result web page URL.
+ string url = 1;
+
+ // (Deprecated) Overall relevancy score for the web page.
+ float score = 2;
+
+ // Title for the web page, may contain HTML markups.
+ string page_title = 3;
+
+ // Fully matching images on the page.
+ // Can include resized copies of the query image.
+ repeated WebImage full_matching_images = 4;
+
+ // Partial matching images on the page.
+ // Those images are similar enough to share some key-point features. For
+ // example an original image will likely have partial matching for its
+ // crops.
+ repeated WebImage partial_matching_images = 5;
+ }
+
+ // Label to provide extra metadata for the web detection.
+ message WebLabel {
+ // Label for extra metadata.
+ string label = 1;
+
+ // The BCP-47 language code for `label`, such as "en-US" or "sr-Latn".
+ // For more information, see
+ // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+ string language_code = 2;
+ }
+
+ // Deduced entities from similar images on the Internet.
+ repeated WebEntity web_entities = 1;
+
+ // Fully matching images from the Internet.
+ // Can include resized copies of the query image.
+ repeated WebImage full_matching_images = 2;
+
+ // Partial matching images from the Internet.
+ // Those images are similar enough to share some key-point features. For
+ // example an original image will likely have partial matching for its crops.
+ repeated WebImage partial_matching_images = 3;
+
+ // Web pages containing the matching images from the Internet.
+ repeated WebPage pages_with_matching_images = 4;
+
+ // The visually similar image results.
+ repeated WebImage visually_similar_images = 6;
+
+ // The service's best guess as to the topic of the request image.
+ // Inferred from similar images on the open web.
+ repeated WebLabel best_guess_labels = 8;
+}
diff --git a/google/cloud/vision/v1p1beta1/BUILD.bazel b/google/cloud/vision/v1p1beta1/BUILD.bazel
new file mode 100644
index 000000000..aa73427db
--- /dev/null
+++ b/google/cloud/vision/v1p1beta1/BUILD.bazel
@@ -0,0 +1,153 @@
+# This is an API workspace, having public visibility by default makes perfect sense.
+package(default_visibility = ["//visibility:public"])
+
+##############################################################################
+# Common
+##############################################################################
+load("@com_google_api_codegen//rules_gapic:gapic.bzl", "proto_library_with_info")
+
+proto_library(
+ name = "vision_proto",
+ srcs = [
+ "geometry.proto",
+ "image_annotator.proto",
+ "text_annotation.proto",
+ "web_detection.proto",
+ ],
+ deps = [
+ "//google/api:annotations_proto",
+ "//google/longrunning:operations_proto",
+ "//google/rpc:status_proto",
+ "//google/type:color_proto",
+ "//google/type:latlng_proto",
+ ],
+)
+
+proto_library_with_info(
+ name = "vision_proto_with_info",
+ deps = [":vision_proto"],
+)
+
+##############################################################################
+# Java
+##############################################################################
+load("@io_grpc_grpc_java//:java_grpc_library.bzl", "java_grpc_library")
+load(
+ "@com_google_api_codegen//rules_gapic/java:java_gapic.bzl",
+ "java_gapic_library",
+ "java_resource_name_proto_library",
+)
+load("@com_google_api_codegen//rules_gapic/java:java_gapic_pkg.bzl", "java_gapic_assembly_gradle_pkg")
+
+_JAVA_GRPC_DEPS = [
+ "@com_google_api_grpc_proto_google_common_protos//jar",
+]
+
+java_proto_library(
+ name = "vision_java_proto",
+ deps = [":vision_proto"],
+)
+
+java_grpc_library(
+ name = "vision_java_grpc",
+ srcs = [":vision_proto"],
+ deps = [":vision_java_proto"] + _JAVA_GRPC_DEPS,
+)
+
+java_resource_name_proto_library(
+ name = "vision_resource_name_java_proto",
+ gapic_yaml = "vision_gapic.yaml",
+ deps = [":vision_proto"],
+)
+
+java_gapic_library(
+ name = "vision_java_gapic",
+ src = ":vision_proto_with_info",
+ gapic_yaml = "vision_gapic.yaml",
+ service_yaml = "//google/cloud/vision:vision_v1p1beta1.yaml",
+ test_deps = [":vision_java_grpc"],
+ deps = [
+ ":vision_java_proto",
+ ":vision_resource_name_java_proto",
+ ] + _JAVA_GRPC_DEPS,
+)
+
+[java_test(
+ name = test_name,
+ test_class = test_name,
+ runtime_deps = [
+ ":vision_java_gapic_test",
+ ],
+) for test_name in [
+ "com.google.cloud.vision.v1p1beta1.ImageAnnotatorClientTest",
+]]
+
+# Opensource Packages
+java_gapic_assembly_gradle_pkg(
+ name = "google-cloud-vision-v1p1beta1-java",
+ client_deps = [":vision_java_gapic"],
+ client_group = "com.google.cloud",
+ client_test_deps = [":vision_java_gapic_test"],
+ grpc_deps = [":vision_java_grpc"],
+ grpc_group = "com.google.api.grpc",
+ proto_deps = [
+ ":vision_java_proto",
+ ":vision_proto",
+ ":vision_resource_name_java_proto",
+ ] + _JAVA_GRPC_DEPS,
+ version = "0.0.0-SNAPSHOT",
+)
+
+##############################################################################
+# Go
+##############################################################################
+load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
+load("@com_google_api_codegen//rules_gapic/go:go_gapic.bzl", "go_gapic_srcjar", "go_gapic_library")
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+load("@com_google_api_codegen//rules_gapic/go:go_gapic_pkg.bzl", "go_gapic_assembly_pkg")
+
+go_proto_library(
+ name = "vision_go_proto",
+ compilers = ["@io_bazel_rules_go//proto:go_grpc"],
+ importpath = "google.golang.org/genproto/googleapis/cloud/vision/v1p1beta1",
+ protos = [":vision_proto_with_info"],
+ deps = [
+ "//google/api:annotations_go_proto",
+ "//google/longrunning:longrunning_go_proto",
+ "//google/rpc:status_go_proto",
+ "//google/type:color_go_proto",
+ "//google/type:latlng_go_proto",
+ ],
+)
+
+go_gapic_library(
+ name = "vision_go_gapic",
+ src = ":vision_proto_with_info",
+ gapic_yaml = "vision_gapic.yaml",
+ importpath = "cloud.google.com/go/vision/apiv1p1beta1",
+ service_yaml = "//google/cloud/vision:vision_v1p1beta1.yaml",
+ deps = [
+ ":vision_go_proto",
+ "//google/longrunning:longrunning_go_gapic",
+ "//google/longrunning:longrunning_go_proto",
+ "@com_google_cloud_go//longrunning:go_default_library",
+ ],
+)
+
+go_test(
+ name = "vision_go_gapic_test",
+ srcs = [":vision_go_gapic_srcjar_test"],
+ embed = [":vision_go_gapic"],
+ importpath = "cloud.google.com/go/vision/apiv1p1beta1",
+)
+
+# Opensource Packages
+go_gapic_assembly_pkg(
+ name = "gapi-cloud-vision-v1p1beta1-go",
+ deps = [
+ ":vision_go_gapic",
+ ":vision_go_gapic_srcjar-smoke-test.srcjar",
+ ":vision_go_gapic_srcjar-test.srcjar",
+ ":vision_go_proto",
+ ],
+)
diff --git a/google/cloud/vision/v1p1beta1/geometry.proto b/google/cloud/vision/v1p1beta1/geometry.proto
new file mode 100644
index 000000000..6d46d9c34
--- /dev/null
+++ b/google/cloud/vision/v1p1beta1/geometry.proto
@@ -0,0 +1,53 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p1beta1;
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p1beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "GeometryProto";
+option java_package = "com.google.cloud.vision.v1p1beta1";
+
+// A vertex represents a 2D point in the image.
+// NOTE: the vertex coordinates are in the same scale as the original image.
+message Vertex {
+ // X coordinate.
+ int32 x = 1;
+
+ // Y coordinate.
+ int32 y = 2;
+}
+
+// A bounding polygon for the detected image annotation.
+message BoundingPoly {
+ // The bounding polygon vertices.
+ repeated Vertex vertices = 1;
+}
+
+// A 3D position in the image, used primarily for Face detection landmarks.
+// A valid Position must have both x and y coordinates.
+// The position coordinates are in the same scale as the original image.
+message Position {
+ // X coordinate.
+ float x = 1;
+
+ // Y coordinate.
+ float y = 2;
+
+ // Z coordinate (or depth).
+ float z = 3;
+}
diff --git a/google/cloud/vision/v1p1beta1/image_annotator.proto b/google/cloud/vision/v1p1beta1/image_annotator.proto
new file mode 100644
index 000000000..4869a3311
--- /dev/null
+++ b/google/cloud/vision/v1p1beta1/image_annotator.proto
@@ -0,0 +1,592 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p1beta1;
+
+import "google/api/annotations.proto";
+import "google/cloud/vision/v1p1beta1/geometry.proto";
+import "google/cloud/vision/v1p1beta1/text_annotation.proto";
+import "google/cloud/vision/v1p1beta1/web_detection.proto";
+import "google/rpc/status.proto";
+import "google/type/color.proto";
+import "google/type/latlng.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p1beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "ImageAnnotatorProto";
+option java_package = "com.google.cloud.vision.v1p1beta1";
+
+// Service that performs Google Cloud Vision API detection tasks over client
+// images, such as face, landmark, logo, label, and text detection. The
+// ImageAnnotator service returns detected entities from the images.
+service ImageAnnotator {
+ // Run image detection and annotation for a batch of images.
+ rpc BatchAnnotateImages(BatchAnnotateImagesRequest)
+ returns (BatchAnnotateImagesResponse) {
+ option (google.api.http) = {
+ post: "/v1p1beta1/images:annotate"
+ body: "*"
+ };
+ }
+}
+
+// Users describe the type of Google Cloud Vision API tasks to perform over
+// images by using *Feature*s. Each Feature indicates a type of image
+// detection task to perform. Features encode the Cloud Vision API
+// vertical to operate on and the number of top-scoring results to return.
+message Feature {
+ // Type of image feature.
+ enum Type {
+ // Unspecified feature type.
+ TYPE_UNSPECIFIED = 0;
+
+ // Run face detection.
+ FACE_DETECTION = 1;
+
+ // Run landmark detection.
+ LANDMARK_DETECTION = 2;
+
+ // Run logo detection.
+ LOGO_DETECTION = 3;
+
+ // Run label detection.
+ LABEL_DETECTION = 4;
+
+ // Run OCR.
+ TEXT_DETECTION = 5;
+
+ // Run dense text document OCR. Takes precedence when both
+ // DOCUMENT_TEXT_DETECTION and TEXT_DETECTION are present.
+ DOCUMENT_TEXT_DETECTION = 11;
+
+ // Run computer vision models to compute image safe-search properties.
+ SAFE_SEARCH_DETECTION = 6;
+
+ // Compute a set of image properties, such as the image's dominant colors.
+ IMAGE_PROPERTIES = 7;
+
+ // Run crop hints.
+ CROP_HINTS = 9;
+
+ // Run web detection.
+ WEB_DETECTION = 10;
+ }
+
+ // The feature type.
+ Type type = 1;
+
+ // Maximum number of results of this type.
+ int32 max_results = 2;
+
+ // Model to use for the feature.
+ // Supported values: "builtin/stable" (the default if unset) and
+ // "builtin/latest".
+ string model = 3;
+}
+
+// External image source (Google Cloud Storage image location).
+message ImageSource {
+ // NOTE: For new code `image_uri` below is preferred.
+ // Google Cloud Storage image URI, which must be in the following form:
+ // `gs://bucket_name/object_name` (for details, see
+ // [Google Cloud Storage Request
+ // URIs](https://cloud.google.com/storage/docs/reference-uris)).
+ // NOTE: Cloud Storage object versioning is not supported.
+ string gcs_image_uri = 1;
+
+ // Image URI which supports:
+ // 1) Google Cloud Storage image URI, which must be in the following form:
+ // `gs://bucket_name/object_name` (for details, see
+ // [Google Cloud Storage Request
+ // URIs](https://cloud.google.com/storage/docs/reference-uris)).
+ // NOTE: Cloud Storage object versioning is not supported.
+ // 2) Publicly accessible image HTTP/HTTPS URL.
+ // This is preferred over the legacy `gcs_image_uri` above. When both
+ // `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
+ // precedence.
+ string image_uri = 2;
+}
+
+// Client image to perform Google Cloud Vision API tasks over.
+message Image {
+ // Image content, represented as a stream of bytes.
+ // Note: as with all `bytes` fields, protobuffers use a pure binary
+ // representation, whereas JSON representations use base64.
+ bytes content = 1;
+
+ // Google Cloud Storage image location. If both `content` and `source`
+ // are provided for an image, `content` takes precedence and is
+ // used to perform the image annotation request.
+ ImageSource source = 2;
+}
+
+// A face annotation object contains the results of face detection.
+message FaceAnnotation {
+ // A face-specific landmark (for example, a face feature).
+ message Landmark {
+ // Face landmark (feature) type.
+ // Left and right are defined from the vantage of the viewer of the image
+ // without considering mirror projections typical of photos. So, `LEFT_EYE`,
+ // typically, is the person's right eye.
+ enum Type {
+ // Unknown face landmark detected. Should not be filled.
+ UNKNOWN_LANDMARK = 0;
+
+ // Left eye.
+ LEFT_EYE = 1;
+
+ // Right eye.
+ RIGHT_EYE = 2;
+
+ // Left of left eyebrow.
+ LEFT_OF_LEFT_EYEBROW = 3;
+
+ // Right of left eyebrow.
+ RIGHT_OF_LEFT_EYEBROW = 4;
+
+ // Left of right eyebrow.
+ LEFT_OF_RIGHT_EYEBROW = 5;
+
+ // Right of right eyebrow.
+ RIGHT_OF_RIGHT_EYEBROW = 6;
+
+ // Midpoint between eyes.
+ MIDPOINT_BETWEEN_EYES = 7;
+
+ // Nose tip.
+ NOSE_TIP = 8;
+
+ // Upper lip.
+ UPPER_LIP = 9;
+
+ // Lower lip.
+ LOWER_LIP = 10;
+
+ // Mouth left.
+ MOUTH_LEFT = 11;
+
+ // Mouth right.
+ MOUTH_RIGHT = 12;
+
+ // Mouth center.
+ MOUTH_CENTER = 13;
+
+ // Nose, bottom right.
+ NOSE_BOTTOM_RIGHT = 14;
+
+ // Nose, bottom left.
+ NOSE_BOTTOM_LEFT = 15;
+
+ // Nose, bottom center.
+ NOSE_BOTTOM_CENTER = 16;
+
+ // Left eye, top boundary.
+ LEFT_EYE_TOP_BOUNDARY = 17;
+
+ // Left eye, right corner.
+ LEFT_EYE_RIGHT_CORNER = 18;
+
+ // Left eye, bottom boundary.
+ LEFT_EYE_BOTTOM_BOUNDARY = 19;
+
+ // Left eye, left corner.
+ LEFT_EYE_LEFT_CORNER = 20;
+
+ // Right eye, top boundary.
+ RIGHT_EYE_TOP_BOUNDARY = 21;
+
+ // Right eye, right corner.
+ RIGHT_EYE_RIGHT_CORNER = 22;
+
+ // Right eye, bottom boundary.
+ RIGHT_EYE_BOTTOM_BOUNDARY = 23;
+
+ // Right eye, left corner.
+ RIGHT_EYE_LEFT_CORNER = 24;
+
+ // Left eyebrow, upper midpoint.
+ LEFT_EYEBROW_UPPER_MIDPOINT = 25;
+
+ // Right eyebrow, upper midpoint.
+ RIGHT_EYEBROW_UPPER_MIDPOINT = 26;
+
+ // Left ear tragion.
+ LEFT_EAR_TRAGION = 27;
+
+ // Right ear tragion.
+ RIGHT_EAR_TRAGION = 28;
+
+ // Left eye pupil.
+ LEFT_EYE_PUPIL = 29;
+
+ // Right eye pupil.
+ RIGHT_EYE_PUPIL = 30;
+
+ // Forehead glabella.
+ FOREHEAD_GLABELLA = 31;
+
+ // Chin gnathion.
+ CHIN_GNATHION = 32;
+
+ // Chin left gonion.
+ CHIN_LEFT_GONION = 33;
+
+ // Chin right gonion.
+ CHIN_RIGHT_GONION = 34;
+ }
+
+ // Face landmark type.
+ Type type = 3;
+
+ // Face landmark position.
+ Position position = 4;
+ }
+
+ // The bounding polygon around the face. The coordinates of the bounding box
+ // are in the original image's scale, as returned in `ImageParams`.
+ // The bounding box is computed to "frame" the face in accordance with human
+ // expectations. It is based on the landmarker results.
+ // Note that one or more x and/or y coordinates may not be generated in the
+ // `BoundingPoly` (the polygon will be unbounded) if only a partial face
+ // appears in the image to be annotated.
+ BoundingPoly bounding_poly = 1;
+
+ // The `fd_bounding_poly` bounding polygon is tighter than the
+ // `boundingPoly`, and encloses only the skin part of the face. Typically, it
+ // is used to eliminate the face from any image analysis that detects the
+ // "amount of skin" visible in an image. It is not based on the
+ // landmarker results, only on the initial face detection, hence
+ // the <code>fd</code> (face detection) prefix.
+ BoundingPoly fd_bounding_poly = 2;
+
+ // Detected face landmarks.
+ repeated Landmark landmarks = 3;
+
+ // Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
+ // of the face relative to the image vertical about the axis perpendicular to
+ // the face. Range [-180,180].
+ float roll_angle = 4;
+
+ // Yaw angle, which indicates the leftward/rightward angle that the face is
+ // pointing relative to the vertical plane perpendicular to the image. Range
+ // [-180,180].
+ float pan_angle = 5;
+
+ // Pitch angle, which indicates the upwards/downwards angle that the face is
+ // pointing relative to the image's horizontal plane. Range [-180,180].
+ float tilt_angle = 6;
+
+ // Detection confidence. Range [0, 1].
+ float detection_confidence = 7;
+
+ // Face landmarking confidence. Range [0, 1].
+ float landmarking_confidence = 8;
+
+ // Joy likelihood.
+ Likelihood joy_likelihood = 9;
+
+ // Sorrow likelihood.
+ Likelihood sorrow_likelihood = 10;
+
+ // Anger likelihood.
+ Likelihood anger_likelihood = 11;
+
+ // Surprise likelihood.
+ Likelihood surprise_likelihood = 12;
+
+ // Under-exposed likelihood.
+ Likelihood under_exposed_likelihood = 13;
+
+ // Blurred likelihood.
+ Likelihood blurred_likelihood = 14;
+
+ // Headwear likelihood.
+ Likelihood headwear_likelihood = 15;
+}
+
+// Detected entity location information.
+message LocationInfo {
+ // lat/long location coordinates.
+ google.type.LatLng lat_lng = 1;
+}
+
+// A `Property` consists of a user-supplied name/value pair.
+message Property {
+ // Name of the property.
+ string name = 1;
+
+ // Value of the property.
+ string value = 2;
+
+ // Value of numeric properties.
+ uint64 uint64_value = 3;
+}
+
+// Set of detected entity features.
+message EntityAnnotation {
+ // Opaque entity ID. Some IDs may be available in
+ // [Google Knowledge Graph Search
+ // API](https://developers.google.com/knowledge-graph/).
+ string mid = 1;
+
+ // The language code for the locale in which the entity textual
+ // `description` is expressed.
+ string locale = 2;
+
+ // Entity textual description, expressed in its `locale` language.
+ string description = 3;
+
+ // Overall score of the result. Range [0, 1].
+ float score = 4;
+
+ // The accuracy of the entity detection in an image.
+ // For example, for an image in which the "Eiffel Tower" entity is detected,
+ // this field represents the confidence that there is a tower in the query
+ // image. Range [0, 1].
+ float confidence = 5;
+
+ // The relevancy of the ICA (Image Content Annotation) label to the
+ // image. For example, the relevancy of "tower" is likely higher to an image
+ // containing the detected "Eiffel Tower" than to an image containing a
+ // detected distant towering building, even though the confidence that
+ // there is a tower in each image may be the same. Range [0, 1].
+ float topicality = 6;
+
+ // Image region to which this entity belongs. Not produced
+ // for `LABEL_DETECTION` features.
+ BoundingPoly bounding_poly = 7;
+
+ // The location information for the detected entity. Multiple
+ // `LocationInfo` elements can be present because one location may
+ // indicate the location of the scene in the image, and another location
+ // may indicate the location of the place where the image was taken.
+ // Location information is usually present for landmarks.
+ repeated LocationInfo locations = 8;
+
+ // Some entities may have optional user-supplied `Property` (name/value)
+ // fields, such a score or string that qualifies the entity.
+ repeated Property properties = 9;
+}
+
+// Set of features pertaining to the image, computed by computer vision
+// methods over safe-search verticals (for example, adult, spoof, medical,
+// violence).
+message SafeSearchAnnotation {
+ // Represents the adult content likelihood for the image. Adult content may
+ // contain elements such as nudity, pornographic images or cartoons, or
+ // sexual activities.
+ Likelihood adult = 1;
+
+ // Spoof likelihood. The likelihood that an modification
+ // was made to the image's canonical version to make it appear
+ // funny or offensive.
+ Likelihood spoof = 2;
+
+ // Likelihood that this is a medical image.
+ Likelihood medical = 3;
+
+ // Likelihood that this image contains violent content.
+ Likelihood violence = 4;
+
+ // Likelihood that the request image contains racy content. Racy content may
+ // include (but is not limited to) skimpy or sheer clothing, strategically
+ // covered nudity, lewd or provocative poses, or close-ups of sensitive
+ // body areas.
+ Likelihood racy = 9;
+}
+
+// Rectangle determined by min and max `LatLng` pairs.
+message LatLongRect {
+ // Min lat/long pair.
+ google.type.LatLng min_lat_lng = 1;
+
+ // Max lat/long pair.
+ google.type.LatLng max_lat_lng = 2;
+}
+
+// Color information consists of RGB channels, score, and the fraction of
+// the image that the color occupies in the image.
+message ColorInfo {
+ // RGB components of the color.
+ google.type.Color color = 1;
+
+ // Image-specific score for this color. Value in range [0, 1].
+ float score = 2;
+
+ // The fraction of pixels the color occupies in the image.
+ // Value in range [0, 1].
+ float pixel_fraction = 3;
+}
+
+// Set of dominant colors and their corresponding scores.
+message DominantColorsAnnotation {
+ // RGB color values with their score and pixel fraction.
+ repeated ColorInfo colors = 1;
+}
+
+// Stores image properties, such as dominant colors.
+message ImageProperties {
+ // If present, dominant colors completed successfully.
+ DominantColorsAnnotation dominant_colors = 1;
+}
+
+// Single crop hint that is used to generate a new crop when serving an image.
+message CropHint {
+ // The bounding polygon for the crop region. The coordinates of the bounding
+ // box are in the original image's scale, as returned in `ImageParams`.
+ BoundingPoly bounding_poly = 1;
+
+ // Confidence of this being a salient region. Range [0, 1].
+ float confidence = 2;
+
+ // Fraction of importance of this salient region with respect to the original
+ // image.
+ float importance_fraction = 3;
+}
+
+// Set of crop hints that are used to generate new crops when serving images.
+message CropHintsAnnotation {
+ // Crop hint results.
+ repeated CropHint crop_hints = 1;
+}
+
+// Parameters for crop hints annotation request.
+message CropHintsParams {
+ // Aspect ratios in floats, representing the ratio of the width to the height
+ // of the image. For example, if the desired aspect ratio is 4/3, the
+ // corresponding float value should be 1.33333. If not specified, the
+ // best possible crop is returned. The number of provided aspect ratios is
+ // limited to a maximum of 16; any aspect ratios provided after the 16th are
+ // ignored.
+ repeated float aspect_ratios = 1;
+}
+
+// Parameters for web detection request.
+message WebDetectionParams {
+ // Whether to include results derived from the geo information in the image.
+ bool include_geo_results = 2;
+}
+
+// Image context and/or feature-specific parameters.
+message ImageContext {
+ // lat/long rectangle that specifies the location of the image.
+ LatLongRect lat_long_rect = 1;
+
+ // List of languages to use for TEXT_DETECTION. In most cases, an empty value
+ // yields the best results since it enables automatic language detection. For
+ // languages based on the Latin alphabet, setting `language_hints` is not
+ // needed. In rare cases, when the language of the text in the image is known,
+ // setting a hint will help get better results (although it will be a
+ // significant hindrance if the hint is wrong). Text detection returns an
+ // error if one or more of the specified languages is not one of the
+ // [supported languages](/vision/docs/languages).
+ repeated string language_hints = 2;
+
+ // Parameters for crop hints annotation request.
+ CropHintsParams crop_hints_params = 4;
+
+ // Parameters for web detection.
+ WebDetectionParams web_detection_params = 6;
+}
+
+// Request for performing Google Cloud Vision API tasks over a user-provided
+// image, with user-requested features.
+message AnnotateImageRequest {
+ // The image to be processed.
+ Image image = 1;
+
+ // Requested features.
+ repeated Feature features = 2;
+
+ // Additional context that may accompany the image.
+ ImageContext image_context = 3;
+}
+
+// Response to an image annotation request.
+message AnnotateImageResponse {
+ // If present, face detection has completed successfully.
+ repeated FaceAnnotation face_annotations = 1;
+
+ // If present, landmark detection has completed successfully.
+ repeated EntityAnnotation landmark_annotations = 2;
+
+ // If present, logo detection has completed successfully.
+ repeated EntityAnnotation logo_annotations = 3;
+
+ // If present, label detection has completed successfully.
+ repeated EntityAnnotation label_annotations = 4;
+
+ // If present, text (OCR) detection has completed successfully.
+ repeated EntityAnnotation text_annotations = 5;
+
+ // If present, text (OCR) detection or document (OCR) text detection has
+ // completed successfully.
+ // This annotation provides the structural hierarchy for the OCR detected
+ // text.
+ TextAnnotation full_text_annotation = 12;
+
+ // If present, safe-search annotation has completed successfully.
+ SafeSearchAnnotation safe_search_annotation = 6;
+
+ // If present, image properties were extracted successfully.
+ ImageProperties image_properties_annotation = 8;
+
+ // If present, crop hints have completed successfully.
+ CropHintsAnnotation crop_hints_annotation = 11;
+
+ // If present, web detection has completed successfully.
+ WebDetection web_detection = 13;
+
+ // If set, represents the error message for the operation.
+ // Note that filled-in image annotations are guaranteed to be
+ // correct, even when `error` is set.
+ google.rpc.Status error = 9;
+}
+
+// Multiple image annotation requests are batched into a single service call.
+message BatchAnnotateImagesRequest {
+ // Individual image annotation requests for this batch.
+ repeated AnnotateImageRequest requests = 1;
+}
+
+// Response to a batch image annotation request.
+message BatchAnnotateImagesResponse {
+ // Individual responses to image annotation requests within the batch.
+ repeated AnnotateImageResponse responses = 1;
+}
+
+// A bucketized representation of likelihood, which is intended to give clients
+// highly stable results across model upgrades.
+enum Likelihood {
+ // Unknown likelihood.
+ UNKNOWN = 0;
+
+ // It is very unlikely that the image belongs to the specified vertical.
+ VERY_UNLIKELY = 1;
+
+ // It is unlikely that the image belongs to the specified vertical.
+ UNLIKELY = 2;
+
+ // It is possible that the image belongs to the specified vertical.
+ POSSIBLE = 3;
+
+ // It is likely that the image belongs to the specified vertical.
+ LIKELY = 4;
+
+ // It is very likely that the image belongs to the specified vertical.
+ VERY_LIKELY = 5;
+}
diff --git a/google/cloud/vision/v1p1beta1/text_annotation.proto b/google/cloud/vision/v1p1beta1/text_annotation.proto
new file mode 100644
index 000000000..928e6e88b
--- /dev/null
+++ b/google/cloud/vision/v1p1beta1/text_annotation.proto
@@ -0,0 +1,252 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p1beta1;
+
+import "google/api/annotations.proto";
+import "google/cloud/vision/v1p1beta1/geometry.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p1beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "TextAnnotationProto";
+option java_package = "com.google.cloud.vision.v1p1beta1";
+
+// TextAnnotation contains a structured representation of OCR extracted text.
+// The hierarchy of an OCR extracted text structure is like this:
+// TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
+// Each structural component, starting from Page, may further have their own
+// properties. Properties describe detected languages, breaks etc.. Please refer
+// to the
+// [TextAnnotation.TextProperty][google.cloud.vision.v1p1beta1.TextAnnotation.TextProperty]
+// message definition below for more detail.
+message TextAnnotation {
+ // Detected language for a structural component.
+ message DetectedLanguage {
+ // The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+ // information, see
+ // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+ string language_code = 1;
+
+ // Confidence of detected language. Range [0, 1].
+ float confidence = 2;
+ }
+
+ // Detected start or end of a structural component.
+ message DetectedBreak {
+ // Enum to denote the type of break found. New line, space etc.
+ enum BreakType {
+ // Unknown break label type.
+ UNKNOWN = 0;
+
+ // Regular space.
+ SPACE = 1;
+
+ // Sure space (very wide).
+ SURE_SPACE = 2;
+
+ // Line-wrapping break.
+ EOL_SURE_SPACE = 3;
+
+ // End-line hyphen that is not present in text; does not co-occur with
+ // `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`.
+ HYPHEN = 4;
+
+ // Line break that ends a paragraph.
+ LINE_BREAK = 5;
+ }
+
+ // Detected break type.
+ BreakType type = 1;
+
+ // True if break prepends the element.
+ bool is_prefix = 2;
+ }
+
+ // Additional information detected on the structural component.
+ message TextProperty {
+ // A list of detected languages together with confidence.
+ repeated DetectedLanguage detected_languages = 1;
+
+ // Detected start or end of a text segment.
+ DetectedBreak detected_break = 2;
+ }
+
+ // List of pages detected by OCR.
+ repeated Page pages = 1;
+
+ // UTF-8 text detected on the pages.
+ string text = 2;
+}
+
+// Detected page from OCR.
+message Page {
+ // Additional information detected on the page.
+ TextAnnotation.TextProperty property = 1;
+
+ // Page width in pixels.
+ int32 width = 2;
+
+ // Page height in pixels.
+ int32 height = 3;
+
+ // List of blocks of text, images etc on this page.
+ repeated Block blocks = 4;
+
+ // Confidence of the OCR results on the page. Range [0, 1].
+ float confidence = 5;
+}
+
+// Logical element on the page.
+message Block {
+ // Type of a block (text, image etc) as identified by OCR.
+ enum BlockType {
+ // Unknown block type.
+ UNKNOWN = 0;
+
+ // Regular text block.
+ TEXT = 1;
+
+ // Table block.
+ TABLE = 2;
+
+ // Image block.
+ PICTURE = 3;
+
+ // Horizontal/vertical line box.
+ RULER = 4;
+
+ // Barcode block.
+ BARCODE = 5;
+ }
+
+ // Additional information detected for the block.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the block.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ // * when the text is horizontal it might look like:
+ // 0----1
+ // | |
+ // 3----2
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ // 2----3
+ // | |
+ // 1----0
+ // and the vertice order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // List of paragraphs in this block (if this blocks is of type text).
+ repeated Paragraph paragraphs = 3;
+
+ // Detected block type (text, image etc) for this block.
+ BlockType block_type = 4;
+
+ // Confidence of the OCR results on the block. Range [0, 1].
+ float confidence = 5;
+}
+
+// Structural unit of text representing a number of words in certain order.
+message Paragraph {
+ // Additional information detected for the paragraph.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the paragraph.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ // * when the text is horizontal it might look like:
+ // 0----1
+ // | |
+ // 3----2
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ // 2----3
+ // | |
+ // 1----0
+ // and the vertice order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // List of words in this paragraph.
+ repeated Word words = 3;
+
+ // Confidence of the OCR results for the paragraph. Range [0, 1].
+ float confidence = 4;
+}
+
+// A word representation.
+message Word {
+ // Additional information detected for the word.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the word.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ // * when the text is horizontal it might look like:
+ // 0----1
+ // | |
+ // 3----2
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ // 2----3
+ // | |
+ // 1----0
+ // and the vertice order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // List of symbols in the word.
+ // The order of the symbols follows the natural reading order.
+ repeated Symbol symbols = 3;
+
+ // Confidence of the OCR results for the word. Range [0, 1].
+ float confidence = 4;
+}
+
+// A single symbol representation.
+message Symbol {
+ // Additional information detected for the symbol.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the symbol.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ // * when the text is horizontal it might look like:
+ // 0----1
+ // | |
+ // 3----2
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ // 2----3
+ // | |
+ // 1----0
+ // and the vertice order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // The actual UTF-8 representation of the symbol.
+ string text = 3;
+
+ // Confidence of the OCR results for the symbol. Range [0, 1].
+ float confidence = 4;
+}
diff --git a/google/cloud/vision/v1p1beta1/vision_gapic.yaml b/google/cloud/vision/v1p1beta1/vision_gapic.yaml
new file mode 100644
index 000000000..07b9bd9d4
--- /dev/null
+++ b/google/cloud/vision/v1p1beta1/vision_gapic.yaml
@@ -0,0 +1,55 @@
+type: com.google.api.codegen.ConfigProto
+config_schema_version: 1.0.0
+language_settings:
+ java:
+ package_name: com.google.cloud.vision.v1p1beta1
+ python:
+ package_name: google.cloud.vision_v1p1beta1.gapic
+ go:
+ package_name: cloud.google.com/go/vision/apiv1p1beta1
+ release_level: BETA
+ csharp:
+ package_name: Google.Cloud.Vision.V1P1Beta1
+ release_level: BETA
+ ruby:
+ package_name: Google::Cloud::Vision::V1p1beta1
+ php:
+ package_name: Google\Cloud\Vision\V1p1beta1
+ nodejs:
+ package_name: vision.v1p1beta1
+ domain_layer_location: google-cloud
+interfaces:
+- name: google.cloud.vision.v1p1beta1.ImageAnnotator
+ smoke_test:
+ method: BatchAnnotateImages
+ init_fields:
+ - requests[0].image.source.gcs_image_uri="gs://gapic-toolkit/President_Barack_Obama.jpg"
+ - requests[0].features[0].type=FACE_DETECTION
+ collections: []
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes: []
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 60000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 60000
+ total_timeout_millis: 600000
+ methods:
+ - name: BatchAnnotateImages
+ flattening:
+ groups:
+ - parameters:
+ - requests
+ required_fields:
+ - requests
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
diff --git a/google/cloud/vision/v1p1beta1/web_detection.proto b/google/cloud/vision/v1p1beta1/web_detection.proto
new file mode 100644
index 000000000..28249cbde
--- /dev/null
+++ b/google/cloud/vision/v1p1beta1/web_detection.proto
@@ -0,0 +1,104 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p1beta1;
+
+import "google/api/annotations.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p1beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "WebDetectionProto";
+option java_package = "com.google.cloud.vision.v1p1beta1";
+
+// Relevant information for the image from the Internet.
+message WebDetection {
+ // Entity deduced from similar images on the Internet.
+ message WebEntity {
+ // Opaque entity ID.
+ string entity_id = 1;
+
+ // Overall relevancy score for the entity.
+ // Not normalized and not comparable across different image queries.
+ float score = 2;
+
+ // Canonical description of the entity, in English.
+ string description = 3;
+ }
+
+ // Metadata for online images.
+ message WebImage {
+ // The result image URL.
+ string url = 1;
+
+ // (Deprecated) Overall relevancy score for the image.
+ float score = 2;
+ }
+
+ // Metadata for web pages.
+ message WebPage {
+ // The result web page URL.
+ string url = 1;
+
+ // (Deprecated) Overall relevancy score for the web page.
+ float score = 2;
+
+ // Title for the web page, may contain HTML markups.
+ string page_title = 3;
+
+ // Fully matching images on the page.
+ // Can include resized copies of the query image.
+ repeated WebImage full_matching_images = 4;
+
+ // Partial matching images on the page.
+ // Those images are similar enough to share some key-point features. For
+ // example an original image will likely have partial matching for its
+ // crops.
+ repeated WebImage partial_matching_images = 5;
+ }
+
+ // Label to provide extra metadata for the web detection.
+ message WebLabel {
+ // Label for extra metadata.
+ string label = 1;
+
+ // The BCP-47 language code for `label`, such as "en-US" or "sr-Latn".
+ // For more information, see
+ // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+ string language_code = 2;
+ }
+
+ // Deduced entities from similar images on the Internet.
+ repeated WebEntity web_entities = 1;
+
+ // Fully matching images from the Internet.
+ // Can include resized copies of the query image.
+ repeated WebImage full_matching_images = 2;
+
+ // Partial matching images from the Internet.
+ // Those images are similar enough to share some key-point features. For
+ // example an original image will likely have partial matching for its crops.
+ repeated WebImage partial_matching_images = 3;
+
+ // Web pages containing the matching images from the Internet.
+ repeated WebPage pages_with_matching_images = 4;
+
+ // The visually similar image results.
+ repeated WebImage visually_similar_images = 6;
+
+ // Best guess text labels for the request image.
+ repeated WebLabel best_guess_labels = 8;
+}
diff --git a/google/cloud/vision/v1p2beta1/BUILD.bazel b/google/cloud/vision/v1p2beta1/BUILD.bazel
new file mode 100644
index 000000000..87336bc96
--- /dev/null
+++ b/google/cloud/vision/v1p2beta1/BUILD.bazel
@@ -0,0 +1,154 @@
+# This is an API workspace, having public visibility by default makes perfect sense.
+package(default_visibility = ["//visibility:public"])
+
+##############################################################################
+# Common
+##############################################################################
+load("@com_google_api_codegen//rules_gapic:gapic.bzl", "proto_library_with_info")
+
+proto_library(
+ name = "vision_proto",
+ srcs = [
+ "geometry.proto",
+ "image_annotator.proto",
+ "text_annotation.proto",
+ "web_detection.proto",
+ ],
+ deps = [
+ "//google/api:annotations_proto",
+ "//google/longrunning:operations_proto",
+ "//google/rpc:status_proto",
+ "//google/type:color_proto",
+ "//google/type:latlng_proto",
+ "@com_google_protobuf//:timestamp_proto",
+ ],
+)
+
+proto_library_with_info(
+ name = "vision_proto_with_info",
+ deps = [":vision_proto"],
+)
+
+##############################################################################
+# Java
+##############################################################################
+load("@io_grpc_grpc_java//:java_grpc_library.bzl", "java_grpc_library")
+load(
+ "@com_google_api_codegen//rules_gapic/java:java_gapic.bzl",
+ "java_gapic_library",
+ "java_resource_name_proto_library",
+)
+load("@com_google_api_codegen//rules_gapic/java:java_gapic_pkg.bzl", "java_gapic_assembly_gradle_pkg")
+
+_JAVA_GRPC_DEPS = [
+ "@com_google_api_grpc_proto_google_common_protos//jar",
+]
+
+java_proto_library(
+ name = "vision_java_proto",
+ deps = [":vision_proto"],
+)
+
+java_grpc_library(
+ name = "vision_java_grpc",
+ srcs = [":vision_proto"],
+ deps = [":vision_java_proto"] + _JAVA_GRPC_DEPS,
+)
+
+java_resource_name_proto_library(
+ name = "vision_resource_name_java_proto",
+ gapic_yaml = "vision_gapic.yaml",
+ deps = [":vision_proto"],
+)
+
+java_gapic_library(
+ name = "vision_java_gapic",
+ src = ":vision_proto_with_info",
+ gapic_yaml = "vision_gapic.yaml",
+ service_yaml = "//google/cloud/vision:vision_v1p2beta1.yaml",
+ test_deps = [":vision_java_grpc"],
+ deps = [
+ ":vision_java_proto",
+ ":vision_resource_name_java_proto",
+ ] + _JAVA_GRPC_DEPS,
+)
+
+[java_test(
+ name = test_name,
+ test_class = test_name,
+ runtime_deps = [
+ ":vision_java_gapic_test",
+ ],
+) for test_name in [
+ "com.google.cloud.vision.v1p2beta1.ImageAnnotatorClientTest",
+]]
+
+# Opensource Packages
+java_gapic_assembly_gradle_pkg(
+ name = "google-cloud-vision-v1p2beta1-java",
+ client_deps = [":vision_java_gapic"],
+ client_group = "com.google.cloud",
+ client_test_deps = [":vision_java_gapic_test"],
+ grpc_deps = [":vision_java_grpc"],
+ grpc_group = "com.google.api.grpc",
+ proto_deps = [
+ ":vision_java_proto",
+ ":vision_proto",
+ ":vision_resource_name_java_proto",
+ ] + _JAVA_GRPC_DEPS,
+ version = "0.0.0-SNAPSHOT",
+)
+
+##############################################################################
+# Go
+##############################################################################
+load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
+load("@com_google_api_codegen//rules_gapic/go:go_gapic.bzl", "go_gapic_srcjar", "go_gapic_library")
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+load("@com_google_api_codegen//rules_gapic/go:go_gapic_pkg.bzl", "go_gapic_assembly_pkg")
+
+go_proto_library(
+ name = "vision_go_proto",
+ compilers = ["@io_bazel_rules_go//proto:go_grpc"],
+ importpath = "google.golang.org/genproto/googleapis/cloud/vision/v1p2beta1",
+ protos = [":vision_proto_with_info"],
+ deps = [
+ "//google/api:annotations_go_proto",
+ "//google/longrunning:longrunning_go_proto",
+ "//google/rpc:status_go_proto",
+ "//google/type:color_go_proto",
+ "//google/type:latlng_go_proto",
+ ],
+)
+
+go_gapic_library(
+ name = "vision_go_gapic",
+ src = ":vision_proto_with_info",
+ gapic_yaml = "vision_gapic.yaml",
+ importpath = "cloud.google.com/go/vision/apiv1p2beta1",
+ service_yaml = "//google/cloud/vision:vision_v1p2beta1.yaml",
+ deps = [
+ ":vision_go_proto",
+ "//google/longrunning:longrunning_go_gapic",
+ "//google/longrunning:longrunning_go_proto",
+ "@com_google_cloud_go//longrunning:go_default_library",
+ ],
+)
+
+go_test(
+ name = "vision_go_gapic_test",
+ srcs = [":vision_go_gapic_srcjar_test"],
+ embed = [":vision_go_gapic"],
+ importpath = "cloud.google.com/go/vision/apiv1p2beta1",
+)
+
+# Opensource Packages
+go_gapic_assembly_pkg(
+ name = "gapi-cloud-vision-v1p2beta1-go",
+ deps = [
+ ":vision_go_gapic",
+ ":vision_go_gapic_srcjar-smoke-test.srcjar",
+ ":vision_go_gapic_srcjar-test.srcjar",
+ ":vision_go_proto",
+ ],
+) \ No newline at end of file
diff --git a/google/cloud/vision/v1p2beta1/geometry.proto b/google/cloud/vision/v1p2beta1/geometry.proto
new file mode 100644
index 000000000..e9fec20bd
--- /dev/null
+++ b/google/cloud/vision/v1p2beta1/geometry.proto
@@ -0,0 +1,67 @@
+// Copyright 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p2beta1;
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p2beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "GeometryProto";
+option java_package = "com.google.cloud.vision.v1p2beta1";
+
+// A vertex represents a 2D point in the image.
+// NOTE: the vertex coordinates are in the same scale as the original image.
+message Vertex {
+ // X coordinate.
+ int32 x = 1;
+
+ // Y coordinate.
+ int32 y = 2;
+}
+
+// A vertex represents a 2D point in the image.
+// NOTE: the normalized vertex coordinates are relative to the original image
+// and range from 0 to 1.
+message NormalizedVertex {
+ // X coordinate.
+ float x = 1;
+
+ // Y coordinate.
+ float y = 2;
+}
+
+// A bounding polygon for the detected image annotation.
+message BoundingPoly {
+ // The bounding polygon vertices.
+ repeated Vertex vertices = 1;
+
+ // The bounding polygon normalized vertices.
+ repeated NormalizedVertex normalized_vertices = 2;
+}
+
+// A 3D position in the image, used primarily for Face detection landmarks.
+// A valid Position must have both x and y coordinates.
+// The position coordinates are in the same scale as the original image.
+message Position {
+ // X coordinate.
+ float x = 1;
+
+ // Y coordinate.
+ float y = 2;
+
+ // Z coordinate (or depth).
+ float z = 3;
+}
diff --git a/google/cloud/vision/v1p2beta1/image_annotator.proto b/google/cloud/vision/v1p2beta1/image_annotator.proto
new file mode 100644
index 000000000..c5fce50f9
--- /dev/null
+++ b/google/cloud/vision/v1p2beta1/image_annotator.proto
@@ -0,0 +1,765 @@
+// Copyright 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p2beta1;
+
+import "google/api/annotations.proto";
+import "google/cloud/vision/v1p2beta1/geometry.proto";
+import "google/cloud/vision/v1p2beta1/text_annotation.proto";
+import "google/cloud/vision/v1p2beta1/web_detection.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+import "google/type/color.proto";
+import "google/type/latlng.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p2beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "ImageAnnotatorProto";
+option java_package = "com.google.cloud.vision.v1p2beta1";
+
+// Service that performs Google Cloud Vision API detection tasks over client
+// images, such as face, landmark, logo, label, and text detection. The
+// ImageAnnotator service returns detected entities from the images.
+service ImageAnnotator {
+ // Run image detection and annotation for a batch of images.
+ rpc BatchAnnotateImages(BatchAnnotateImagesRequest)
+ returns (BatchAnnotateImagesResponse) {
+ option (google.api.http) = {
+ post: "/v1p2beta1/images:annotate"
+ body: "*"
+ };
+ }
+
+ // Run async image detection and annotation for a list of generic files (e.g.
+ // PDF) which may contain multiple pages and multiple images per page.
+ // Progress and results can be retrieved through the
+ // `google.longrunning.Operations` interface.
+ // `Operation.metadata` contains `OperationMetadata` (metadata).
+ // `Operation.response` contains `AsyncBatchAnnotateFilesResponse` (results).
+ rpc AsyncBatchAnnotateFiles(AsyncBatchAnnotateFilesRequest)
+ returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1p2beta1/files:asyncBatchAnnotate"
+ body: "*"
+ };
+ }
+}
+
+// The type of Google Cloud Vision API detection to perform, and the maximum
+// number of results to return for that type. Multiple `Feature` objects can
+// be specified in the `features` list.
+message Feature {
+ // Type of Google Cloud Vision API feature to be extracted.
+ enum Type {
+ // Unspecified feature type.
+ TYPE_UNSPECIFIED = 0;
+
+ // Run face detection.
+ FACE_DETECTION = 1;
+
+ // Run landmark detection.
+ LANDMARK_DETECTION = 2;
+
+ // Run logo detection.
+ LOGO_DETECTION = 3;
+
+ // Run label detection.
+ LABEL_DETECTION = 4;
+
+ // Run text detection / optical character recognition (OCR). Text detection
+ // is optimized for areas of text within a larger image; if the image is
+ // a document, use `DOCUMENT_TEXT_DETECTION` instead.
+ TEXT_DETECTION = 5;
+
+ // Run dense text document OCR. Takes precedence when both
+ // `DOCUMENT_TEXT_DETECTION` and `TEXT_DETECTION` are present.
+ DOCUMENT_TEXT_DETECTION = 11;
+
+ // Run Safe Search to detect potentially unsafe
+ // or undesirable content.
+ SAFE_SEARCH_DETECTION = 6;
+
+ // Compute a set of image properties, such as the
+ // image's dominant colors.
+ IMAGE_PROPERTIES = 7;
+
+ // Run crop hints.
+ CROP_HINTS = 9;
+
+ // Run web detection.
+ WEB_DETECTION = 10;
+ }
+
+ // The feature type.
+ Type type = 1;
+
+ // Maximum number of results of this type. Does not apply to
+ // `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
+ int32 max_results = 2;
+
+ // Model to use for the feature.
+ // Supported values: "builtin/stable" (the default if unset) and
+ // "builtin/latest".
+ string model = 3;
+}
+
+// External image source (Google Cloud Storage or web URL image location).
+message ImageSource {
+ // **Use `image_uri` instead.**
+ //
+ // The Google Cloud Storage URI of the form
+ // `gs://bucket_name/object_name`. Object versioning is not supported. See
+ // [Google Cloud Storage Request
+ // URIs](https://cloud.google.com/storage/docs/reference-uris) for more info.
+ string gcs_image_uri = 1;
+
+ // The URI of the source image. Can be either:
+ //
+ // 1. A Google Cloud Storage URI of the form
+ // `gs://bucket_name/object_name`. Object versioning is not supported. See
+ // [Google Cloud Storage Request
+ // URIs](https://cloud.google.com/storage/docs/reference-uris) for more
+ // info.
+ //
+ // 2. A publicly-accessible image HTTP/HTTPS URL. When fetching images from
+ // HTTP/HTTPS URLs, Google cannot guarantee that the request will be
+ // completed. Your request may fail if the specified host denies the
+ // request (e.g. due to request throttling or DOS prevention), or if Google
+ // throttles requests to the site for abuse prevention. You should not
+ // depend on externally-hosted images for production applications.
+ //
+ // When both `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
+ // precedence.
+ string image_uri = 2;
+}
+
+// Client image to perform Google Cloud Vision API tasks over.
+message Image {
+ // Image content, represented as a stream of bytes.
+ // Note: As with all `bytes` fields, protobuffers use a pure binary
+ // representation, whereas JSON representations use base64.
+ bytes content = 1;
+
+ // Google Cloud Storage image location, or publicly-accessible image
+ // URL. If both `content` and `source` are provided for an image, `content`
+ // takes precedence and is used to perform the image annotation request.
+ ImageSource source = 2;
+}
+
+// A face annotation object contains the results of face detection.
+message FaceAnnotation {
+ // A face-specific landmark (for example, a face feature).
+ message Landmark {
+ // Face landmark (feature) type.
+ // Left and right are defined from the vantage of the viewer of the image
+ // without considering mirror projections typical of photos. So, `LEFT_EYE`,
+ // typically, is the person's right eye.
+ enum Type {
+ // Unknown face landmark detected. Should not be filled.
+ UNKNOWN_LANDMARK = 0;
+
+ // Left eye.
+ LEFT_EYE = 1;
+
+ // Right eye.
+ RIGHT_EYE = 2;
+
+ // Left of left eyebrow.
+ LEFT_OF_LEFT_EYEBROW = 3;
+
+ // Right of left eyebrow.
+ RIGHT_OF_LEFT_EYEBROW = 4;
+
+ // Left of right eyebrow.
+ LEFT_OF_RIGHT_EYEBROW = 5;
+
+ // Right of right eyebrow.
+ RIGHT_OF_RIGHT_EYEBROW = 6;
+
+ // Midpoint between eyes.
+ MIDPOINT_BETWEEN_EYES = 7;
+
+ // Nose tip.
+ NOSE_TIP = 8;
+
+ // Upper lip.
+ UPPER_LIP = 9;
+
+ // Lower lip.
+ LOWER_LIP = 10;
+
+ // Mouth left.
+ MOUTH_LEFT = 11;
+
+ // Mouth right.
+ MOUTH_RIGHT = 12;
+
+ // Mouth center.
+ MOUTH_CENTER = 13;
+
+ // Nose, bottom right.
+ NOSE_BOTTOM_RIGHT = 14;
+
+ // Nose, bottom left.
+ NOSE_BOTTOM_LEFT = 15;
+
+ // Nose, bottom center.
+ NOSE_BOTTOM_CENTER = 16;
+
+ // Left eye, top boundary.
+ LEFT_EYE_TOP_BOUNDARY = 17;
+
+ // Left eye, right corner.
+ LEFT_EYE_RIGHT_CORNER = 18;
+
+ // Left eye, bottom boundary.
+ LEFT_EYE_BOTTOM_BOUNDARY = 19;
+
+ // Left eye, left corner.
+ LEFT_EYE_LEFT_CORNER = 20;
+
+ // Right eye, top boundary.
+ RIGHT_EYE_TOP_BOUNDARY = 21;
+
+ // Right eye, right corner.
+ RIGHT_EYE_RIGHT_CORNER = 22;
+
+ // Right eye, bottom boundary.
+ RIGHT_EYE_BOTTOM_BOUNDARY = 23;
+
+ // Right eye, left corner.
+ RIGHT_EYE_LEFT_CORNER = 24;
+
+ // Left eyebrow, upper midpoint.
+ LEFT_EYEBROW_UPPER_MIDPOINT = 25;
+
+ // Right eyebrow, upper midpoint.
+ RIGHT_EYEBROW_UPPER_MIDPOINT = 26;
+
+ // Left ear tragion.
+ LEFT_EAR_TRAGION = 27;
+
+ // Right ear tragion.
+ RIGHT_EAR_TRAGION = 28;
+
+ // Left eye pupil.
+ LEFT_EYE_PUPIL = 29;
+
+ // Right eye pupil.
+ RIGHT_EYE_PUPIL = 30;
+
+ // Forehead glabella.
+ FOREHEAD_GLABELLA = 31;
+
+ // Chin gnathion.
+ CHIN_GNATHION = 32;
+
+ // Chin left gonion.
+ CHIN_LEFT_GONION = 33;
+
+ // Chin right gonion.
+ CHIN_RIGHT_GONION = 34;
+ }
+
+ // Face landmark type.
+ Type type = 3;
+
+ // Face landmark position.
+ Position position = 4;
+ }
+
+ // The bounding polygon around the face. The coordinates of the bounding box
+ // are in the original image's scale, as returned in `ImageParams`.
+ // The bounding box is computed to "frame" the face in accordance with human
+ // expectations. It is based on the landmarker results.
+ // Note that one or more x and/or y coordinates may not be generated in the
+ // `BoundingPoly` (the polygon will be unbounded) if only a partial face
+ // appears in the image to be annotated.
+ BoundingPoly bounding_poly = 1;
+
+ // The `fd_bounding_poly` bounding polygon is tighter than the
+ // `boundingPoly`, and encloses only the skin part of the face. Typically, it
+ // is used to eliminate the face from any image analysis that detects the
+ // "amount of skin" visible in an image. It is not based on the
+ // landmarker results, only on the initial face detection, hence
+ // the <code>fd</code> (face detection) prefix.
+ BoundingPoly fd_bounding_poly = 2;
+
+ // Detected face landmarks.
+ repeated Landmark landmarks = 3;
+
+ // Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
+ // of the face relative to the image vertical about the axis perpendicular to
+ // the face. Range [-180,180].
+ float roll_angle = 4;
+
+ // Yaw angle, which indicates the leftward/rightward angle that the face is
+ // pointing relative to the vertical plane perpendicular to the image. Range
+ // [-180,180].
+ float pan_angle = 5;
+
+ // Pitch angle, which indicates the upwards/downwards angle that the face is
+ // pointing relative to the image's horizontal plane. Range [-180,180].
+ float tilt_angle = 6;
+
+ // Detection confidence. Range [0, 1].
+ float detection_confidence = 7;
+
+ // Face landmarking confidence. Range [0, 1].
+ float landmarking_confidence = 8;
+
+ // Joy likelihood.
+ Likelihood joy_likelihood = 9;
+
+ // Sorrow likelihood.
+ Likelihood sorrow_likelihood = 10;
+
+ // Anger likelihood.
+ Likelihood anger_likelihood = 11;
+
+ // Surprise likelihood.
+ Likelihood surprise_likelihood = 12;
+
+ // Under-exposed likelihood.
+ Likelihood under_exposed_likelihood = 13;
+
+ // Blurred likelihood.
+ Likelihood blurred_likelihood = 14;
+
+ // Headwear likelihood.
+ Likelihood headwear_likelihood = 15;
+}
+
+// Detected entity location information.
+message LocationInfo {
+ // lat/long location coordinates.
+ google.type.LatLng lat_lng = 1;
+}
+
+// A `Property` consists of a user-supplied name/value pair.
+message Property {
+ // Name of the property.
+ string name = 1;
+
+ // Value of the property.
+ string value = 2;
+
+ // Value of numeric properties.
+ uint64 uint64_value = 3;
+}
+
+// Set of detected entity features.
+message EntityAnnotation {
+ // Opaque entity ID. Some IDs may be available in
+ // [Google Knowledge Graph Search
+ // API](https://developers.google.com/knowledge-graph/).
+ string mid = 1;
+
+ // The language code for the locale in which the entity textual
+ // `description` is expressed.
+ string locale = 2;
+
+ // Entity textual description, expressed in its `locale` language.
+ string description = 3;
+
+ // Overall score of the result. Range [0, 1].
+ float score = 4;
+
+ // **Deprecated. Use `score` instead.**
+ // The accuracy of the entity detection in an image.
+ // For example, for an image in which the "Eiffel Tower" entity is detected,
+ // this field represents the confidence that there is a tower in the query
+ // image. Range [0, 1].
+ float confidence = 5;
+
+ // The relevancy of the ICA (Image Content Annotation) label to the
+ // image. For example, the relevancy of "tower" is likely higher to an image
+ // containing the detected "Eiffel Tower" than to an image containing a
+ // detected distant towering building, even though the confidence that
+ // there is a tower in each image may be the same. Range [0, 1].
+ float topicality = 6;
+
+ // Image region to which this entity belongs. Not produced
+ // for `LABEL_DETECTION` features.
+ BoundingPoly bounding_poly = 7;
+
+ // The location information for the detected entity. Multiple
+ // `LocationInfo` elements can be present because one location may
+ // indicate the location of the scene in the image, and another location
+ // may indicate the location of the place where the image was taken.
+ // Location information is usually present for landmarks.
+ repeated LocationInfo locations = 8;
+
+ // Some entities may have optional user-supplied `Property` (name/value)
+ // fields, such a score or string that qualifies the entity.
+ repeated Property properties = 9;
+}
+
+// Set of features pertaining to the image, computed by computer vision
+// methods over safe-search verticals (for example, adult, spoof, medical,
+// violence).
+message SafeSearchAnnotation {
+ // Represents the adult content likelihood for the image. Adult content may
+ // contain elements such as nudity, pornographic images or cartoons, or
+ // sexual activities.
+ Likelihood adult = 1;
+
+ // Spoof likelihood. The likelihood that an modification
+ // was made to the image's canonical version to make it appear
+ // funny or offensive.
+ Likelihood spoof = 2;
+
+ // Likelihood that this is a medical image.
+ Likelihood medical = 3;
+
+ // Likelihood that this image contains violent content.
+ Likelihood violence = 4;
+
+ // Likelihood that the request image contains racy content. Racy content may
+ // include (but is not limited to) skimpy or sheer clothing, strategically
+ // covered nudity, lewd or provocative poses, or close-ups of sensitive
+ // body areas.
+ Likelihood racy = 9;
+}
+
+// Rectangle determined by min and max `LatLng` pairs.
+message LatLongRect {
+ // Min lat/long pair.
+ google.type.LatLng min_lat_lng = 1;
+
+ // Max lat/long pair.
+ google.type.LatLng max_lat_lng = 2;
+}
+
+// Color information consists of RGB channels, score, and the fraction of
+// the image that the color occupies in the image.
+message ColorInfo {
+ // RGB components of the color.
+ google.type.Color color = 1;
+
+ // Image-specific score for this color. Value in range [0, 1].
+ float score = 2;
+
+ // The fraction of pixels the color occupies in the image.
+ // Value in range [0, 1].
+ float pixel_fraction = 3;
+}
+
+// Set of dominant colors and their corresponding scores.
+message DominantColorsAnnotation {
+ // RGB color values with their score and pixel fraction.
+ repeated ColorInfo colors = 1;
+}
+
+// Stores image properties, such as dominant colors.
+message ImageProperties {
+ // If present, dominant colors completed successfully.
+ DominantColorsAnnotation dominant_colors = 1;
+}
+
+// Single crop hint that is used to generate a new crop when serving an image.
+message CropHint {
+ // The bounding polygon for the crop region. The coordinates of the bounding
+ // box are in the original image's scale, as returned in `ImageParams`.
+ BoundingPoly bounding_poly = 1;
+
+ // Confidence of this being a salient region. Range [0, 1].
+ float confidence = 2;
+
+ // Fraction of importance of this salient region with respect to the original
+ // image.
+ float importance_fraction = 3;
+}
+
+// Set of crop hints that are used to generate new crops when serving images.
+message CropHintsAnnotation {
+ // Crop hint results.
+ repeated CropHint crop_hints = 1;
+}
+
+// Parameters for crop hints annotation request.
+message CropHintsParams {
+ // Aspect ratios in floats, representing the ratio of the width to the height
+ // of the image. For example, if the desired aspect ratio is 4/3, the
+ // corresponding float value should be 1.33333. If not specified, the
+ // best possible crop is returned. The number of provided aspect ratios is
+ // limited to a maximum of 16; any aspect ratios provided after the 16th are
+ // ignored.
+ repeated float aspect_ratios = 1;
+}
+
+// Parameters for web detection request.
+message WebDetectionParams {
+ // Whether to include results derived from the geo information in the image.
+ bool include_geo_results = 2;
+}
+
+// Image context and/or feature-specific parameters.
+message ImageContext {
+ // Not used.
+ LatLongRect lat_long_rect = 1;
+
+ // List of languages to use for TEXT_DETECTION. In most cases, an empty value
+ // yields the best results since it enables automatic language detection. For
+ // languages based on the Latin alphabet, setting `language_hints` is not
+ // needed. In rare cases, when the language of the text in the image is known,
+ // setting a hint will help get better results (although it will be a
+ // significant hindrance if the hint is wrong). Text detection returns an
+ // error if one or more of the specified languages is not one of the
+ // [supported languages](/vision/docs/languages).
+ repeated string language_hints = 2;
+
+ // Parameters for crop hints annotation request.
+ CropHintsParams crop_hints_params = 4;
+
+ // Parameters for web detection.
+ WebDetectionParams web_detection_params = 6;
+}
+
+// Request for performing Google Cloud Vision API tasks over a user-provided
+// image, with user-requested features.
+message AnnotateImageRequest {
+ // The image to be processed.
+ Image image = 1;
+
+ // Requested features.
+ repeated Feature features = 2;
+
+ // Additional context that may accompany the image.
+ ImageContext image_context = 3;
+}
+
+// If an image was produced from a file (e.g. a PDF), this message gives
+// information about the source of that image.
+message ImageAnnotationContext {
+ // The URI of the file used to produce the image.
+ string uri = 1;
+
+ // If the file was a PDF or TIFF, this field gives the page number within
+ // the file used to produce the image.
+ int32 page_number = 2;
+}
+
+// Response to an image annotation request.
+message AnnotateImageResponse {
+ // If present, face detection has completed successfully.
+ repeated FaceAnnotation face_annotations = 1;
+
+ // If present, landmark detection has completed successfully.
+ repeated EntityAnnotation landmark_annotations = 2;
+
+ // If present, logo detection has completed successfully.
+ repeated EntityAnnotation logo_annotations = 3;
+
+ // If present, label detection has completed successfully.
+ repeated EntityAnnotation label_annotations = 4;
+
+ // If present, text (OCR) detection has completed successfully.
+ repeated EntityAnnotation text_annotations = 5;
+
+ // If present, text (OCR) detection or document (OCR) text detection has
+ // completed successfully.
+ // This annotation provides the structural hierarchy for the OCR detected
+ // text.
+ TextAnnotation full_text_annotation = 12;
+
+ // If present, safe-search annotation has completed successfully.
+ SafeSearchAnnotation safe_search_annotation = 6;
+
+ // If present, image properties were extracted successfully.
+ ImageProperties image_properties_annotation = 8;
+
+ // If present, crop hints have completed successfully.
+ CropHintsAnnotation crop_hints_annotation = 11;
+
+ // If present, web detection has completed successfully.
+ WebDetection web_detection = 13;
+
+ // If set, represents the error message for the operation.
+ // Note that filled-in image annotations are guaranteed to be
+ // correct, even when `error` is set.
+ google.rpc.Status error = 9;
+
+ // If present, contextual information is needed to understand where this image
+ // comes from.
+ ImageAnnotationContext context = 21;
+}
+
+// Response to a single file annotation request. A file may contain one or more
+// images, which individually have their own responses.
+message AnnotateFileResponse {
+ // Information about the file for which this response is generated.
+ InputConfig input_config = 1;
+
+ // Individual responses to images found within the file.
+ repeated AnnotateImageResponse responses = 2;
+}
+
+// Multiple image annotation requests are batched into a single service call.
+message BatchAnnotateImagesRequest {
+ // Individual image annotation requests for this batch.
+ repeated AnnotateImageRequest requests = 1;
+}
+
+// Response to a batch image annotation request.
+message BatchAnnotateImagesResponse {
+ // Individual responses to image annotation requests within the batch.
+ repeated AnnotateImageResponse responses = 1;
+}
+
+// An offline file annotation request.
+message AsyncAnnotateFileRequest {
+ // Required. Information about the input file.
+ InputConfig input_config = 1;
+
+ // Required. Requested features.
+ repeated Feature features = 2;
+
+ // Additional context that may accompany the image(s) in the file.
+ ImageContext image_context = 3;
+
+ // Required. The desired output location and metadata (e.g. format).
+ OutputConfig output_config = 4;
+}
+
+// The response for a single offline file annotation request.
+message AsyncAnnotateFileResponse {
+ // The output location and metadata from AsyncAnnotateFileRequest.
+ OutputConfig output_config = 1;
+}
+
+// Multiple async file annotation requests are batched into a single service
+// call.
+message AsyncBatchAnnotateFilesRequest {
+ // Individual async file annotation requests for this batch.
+ repeated AsyncAnnotateFileRequest requests = 1;
+}
+
+// Response to an async batch file annotation request.
+message AsyncBatchAnnotateFilesResponse {
+ // The list of file annotation responses, one for each request in
+ // AsyncBatchAnnotateFilesRequest.
+ repeated AsyncAnnotateFileResponse responses = 1;
+}
+
+// The desired input location and metadata.
+message InputConfig {
+ // The Google Cloud Storage location to read the input from.
+ GcsSource gcs_source = 1;
+
+ // The type of the file. Currently only "application/pdf" and "image/tiff"
+ // are supported. Wildcards are not supported.
+ string mime_type = 2;
+}
+
+// The desired output location and metadata.
+message OutputConfig {
+ // The Google Cloud Storage location to write the output(s) to.
+ GcsDestination gcs_destination = 1;
+
+ // The max number of response protos to put into each output JSON file on GCS.
+ // The valid range is [1, 100]. If not specified, the default value is 20.
+ //
+ // For example, for one pdf file with 100 pages, 100 response protos will
+ // be generated. If `batch_size` = 20, then 5 json files each
+ // containing 20 response protos will be written under the prefix
+ // `gcs_destination`.`uri`.
+ //
+ // Currently, batch_size only applies to GcsDestination, with potential future
+ // support for other output configurations.
+ int32 batch_size = 2;
+}
+
+// The Google Cloud Storage location where the input will be read from.
+message GcsSource {
+ // Google Cloud Storage URI for the input file. This must only be a GCS
+ // object. Wildcards are not currently supported.
+ string uri = 1;
+}
+
+// The Google Cloud Storage location where the output will be written to.
+message GcsDestination {
+ // Google Cloud Storage URI where the results will be stored. Results will
+ // be in JSON format and preceded by its corresponding input URI. This field
+ // can either represent a single file, or a prefix for multiple outputs.
+ // Prefixes must end in a `/`.
+ //
+ // Examples:
+ //
+ // * File: gs://bucket-name/filename.json
+ // * Prefix: gs://bucket-name/prefix/here/
+ // * File: gs://bucket-name/prefix/here
+ //
+ // If multiple outputs, each response is still AnnotateFileResponse, each of
+ // which contains some subset of the full list of AnnotateImageResponse.
+ // Multiple outputs can happen if, for example, the output JSON is too large
+ // and overflows into multiple sharded files.
+ string uri = 1;
+}
+
+// Contains metadata for the BatchAnnotateImages operation.
+message OperationMetadata {
+ // Batch operation states.
+ enum State {
+ // Invalid.
+ STATE_UNSPECIFIED = 0;
+
+ // Request is received.
+ CREATED = 1;
+
+ // Request is actively being processed.
+ RUNNING = 2;
+
+ // The batch processing is done.
+ DONE = 3;
+
+ // The batch processing was cancelled.
+ CANCELLED = 4;
+ }
+
+ // Current state of the batch operation.
+ State state = 1;
+
+ // The time when the batch request was received.
+ google.protobuf.Timestamp create_time = 5;
+
+ // The time when the operation result was last updated.
+ google.protobuf.Timestamp update_time = 6;
+}
+
+// A bucketized representation of likelihood, which is intended to give clients
+// highly stable results across model upgrades.
+enum Likelihood {
+ // Unknown likelihood.
+ UNKNOWN = 0;
+
+ // It is very unlikely that the image belongs to the specified vertical.
+ VERY_UNLIKELY = 1;
+
+ // It is unlikely that the image belongs to the specified vertical.
+ UNLIKELY = 2;
+
+ // It is possible that the image belongs to the specified vertical.
+ POSSIBLE = 3;
+
+ // It is likely that the image belongs to the specified vertical.
+ LIKELY = 4;
+
+ // It is very likely that the image belongs to the specified vertical.
+ VERY_LIKELY = 5;
+}
diff --git a/google/cloud/vision/v1p2beta1/text_annotation.proto b/google/cloud/vision/v1p2beta1/text_annotation.proto
new file mode 100644
index 000000000..b35eb47e8
--- /dev/null
+++ b/google/cloud/vision/v1p2beta1/text_annotation.proto
@@ -0,0 +1,259 @@
+// Copyright 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p2beta1;
+
+import "google/api/annotations.proto";
+import "google/cloud/vision/v1p2beta1/geometry.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p2beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "TextAnnotationProto";
+option java_package = "com.google.cloud.vision.v1p2beta1";
+
+// TextAnnotation contains a structured representation of OCR extracted text.
+// The hierarchy of an OCR extracted text structure is like this:
+// TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
+// Each structural component, starting from Page, may further have their own
+// properties. Properties describe detected languages, breaks etc.. Please refer
+// to the
+// [TextAnnotation.TextProperty][google.cloud.vision.v1p2beta1.TextAnnotation.TextProperty]
+// message definition below for more detail.
+message TextAnnotation {
+ // Detected language for a structural component.
+ message DetectedLanguage {
+ // The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+ // information, see
+ // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+ string language_code = 1;
+
+ // Confidence of detected language. Range [0, 1].
+ float confidence = 2;
+ }
+
+ // Detected start or end of a structural component.
+ message DetectedBreak {
+ // Enum to denote the type of break found. New line, space etc.
+ enum BreakType {
+ // Unknown break label type.
+ UNKNOWN = 0;
+
+ // Regular space.
+ SPACE = 1;
+
+ // Sure space (very wide).
+ SURE_SPACE = 2;
+
+ // Line-wrapping break.
+ EOL_SURE_SPACE = 3;
+
+ // End-line hyphen that is not present in text; does not co-occur with
+ // `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`.
+ HYPHEN = 4;
+
+ // Line break that ends a paragraph.
+ LINE_BREAK = 5;
+ }
+
+ // Detected break type.
+ BreakType type = 1;
+
+ // True if break prepends the element.
+ bool is_prefix = 2;
+ }
+
+ // Additional information detected on the structural component.
+ message TextProperty {
+ // A list of detected languages together with confidence.
+ repeated DetectedLanguage detected_languages = 1;
+
+ // Detected start or end of a text segment.
+ DetectedBreak detected_break = 2;
+ }
+
+ // List of pages detected by OCR.
+ repeated Page pages = 1;
+
+ // UTF-8 text detected on the pages.
+ string text = 2;
+}
+
+// Detected page from OCR.
+message Page {
+ // Additional information detected on the page.
+ TextAnnotation.TextProperty property = 1;
+
+ // Page width. For PDFs the unit is points. For images (including
+ // TIFFs) the unit is pixels.
+ int32 width = 2;
+
+ // Page height. For PDFs the unit is points. For images (including
+ // TIFFs) the unit is pixels.
+ int32 height = 3;
+
+ // List of blocks of text, images etc on this page.
+ repeated Block blocks = 4;
+
+ // Confidence of the OCR results on the page. Range [0, 1].
+ float confidence = 5;
+}
+
+// Logical element on the page.
+message Block {
+ // Type of a block (text, image etc) as identified by OCR.
+ enum BlockType {
+ // Unknown block type.
+ UNKNOWN = 0;
+
+ // Regular text block.
+ TEXT = 1;
+
+ // Table block.
+ TABLE = 2;
+
+ // Image block.
+ PICTURE = 3;
+
+ // Horizontal/vertical line box.
+ RULER = 4;
+
+ // Barcode block.
+ BARCODE = 5;
+ }
+
+ // Additional information detected for the block.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the block.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ //
+ // * when the text is horizontal it might look like:
+ //
+ // 0----1
+ // | |
+ // 3----2
+ //
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ //
+ // 2----3
+ // | |
+ // 1----0
+ //
+ // and the vertice order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // List of paragraphs in this block (if this blocks is of type text).
+ repeated Paragraph paragraphs = 3;
+
+ // Detected block type (text, image etc) for this block.
+ BlockType block_type = 4;
+
+ // Confidence of the OCR results on the block. Range [0, 1].
+ float confidence = 5;
+}
+
+// Structural unit of text representing a number of words in certain order.
+message Paragraph {
+ // Additional information detected for the paragraph.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the paragraph.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ // * when the text is horizontal it might look like:
+ // 0----1
+ // | |
+ // 3----2
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ // 2----3
+ // | |
+ // 1----0
+ // and the vertice order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // List of words in this paragraph.
+ repeated Word words = 3;
+
+ // Confidence of the OCR results for the paragraph. Range [0, 1].
+ float confidence = 4;
+}
+
+// A word representation.
+message Word {
+ // Additional information detected for the word.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the word.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ // * when the text is horizontal it might look like:
+ // 0----1
+ // | |
+ // 3----2
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ // 2----3
+ // | |
+ // 1----0
+ // and the vertice order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // List of symbols in the word.
+ // The order of the symbols follows the natural reading order.
+ repeated Symbol symbols = 3;
+
+ // Confidence of the OCR results for the word. Range [0, 1].
+ float confidence = 4;
+}
+
+// A single symbol representation.
+message Symbol {
+ // Additional information detected for the symbol.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the symbol.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ // * when the text is horizontal it might look like:
+ // 0----1
+ // | |
+ // 3----2
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ // 2----3
+ // | |
+ // 1----0
+ // and the vertice order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // The actual UTF-8 representation of the symbol.
+ string text = 3;
+
+ // Confidence of the OCR results for the symbol. Range [0, 1].
+ float confidence = 4;
+}
diff --git a/google/cloud/vision/v1p2beta1/vision_gapic.yaml b/google/cloud/vision/v1p2beta1/vision_gapic.yaml
new file mode 100644
index 000000000..41d5c49b4
--- /dev/null
+++ b/google/cloud/vision/v1p2beta1/vision_gapic.yaml
@@ -0,0 +1,72 @@
+type: com.google.api.codegen.ConfigProto
+config_schema_version: 1.0.0
+language_settings:
+ java:
+ package_name: com.google.cloud.vision.v1p2beta1
+ python:
+ package_name: google.cloud.vision_v1p2beta1.gapic
+ go:
+ package_name: cloud.google.com/go/vision/apiv1p2beta1
+ release_level: BETA
+ csharp:
+ package_name: Google.Cloud.Vision.V1P2Beta1
+ release_level: BETA
+ ruby:
+ package_name: Google::Cloud::Vision::V1p2beta1
+ php:
+ package_name: Google\Cloud\Vision\V1p2beta1
+ nodejs:
+ package_name: vision.v1p2beta1
+ domain_layer_location: google-cloud
+interfaces:
+- name: google.cloud.vision.v1p2beta1.ImageAnnotator
+ smoke_test:
+ method: BatchAnnotateImages
+ init_fields:
+ - requests[0].image.source.gcs_image_uri="gs://gapic-toolkit/President_Barack_Obama.jpg"
+ - requests[0].features[0].type=FACE_DETECTION
+ collections: []
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes: []
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 60000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 60000
+ total_timeout_millis: 600000
+ methods:
+ - name: BatchAnnotateImages
+ flattening:
+ groups:
+ - parameters:
+ - requests
+ required_fields:
+ - requests
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: AsyncBatchAnnotateFiles
+ flattening:
+ groups:
+ - parameters:
+ - requests
+ required_fields:
+ - requests
+ long_running:
+ return_type: google.cloud.vision.v1p2beta1.AsyncBatchAnnotateFilesResponse
+ metadata_type: google.cloud.vision.v1p2beta1.OperationMetadata
+ initial_poll_delay_millis: 20000
+ poll_delay_multiplier: 1.5
+ max_poll_delay_millis: 45000
+ total_poll_timeout_millis: 86400000
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
diff --git a/google/cloud/vision/v1p2beta1/web_detection.proto b/google/cloud/vision/v1p2beta1/web_detection.proto
new file mode 100644
index 000000000..f2b9dfef0
--- /dev/null
+++ b/google/cloud/vision/v1p2beta1/web_detection.proto
@@ -0,0 +1,104 @@
+// Copyright 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p2beta1;
+
+import "google/api/annotations.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p2beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "WebDetectionProto";
+option java_package = "com.google.cloud.vision.v1p2beta1";
+
+// Relevant information for the image from the Internet.
+message WebDetection {
+ // Entity deduced from similar images on the Internet.
+ message WebEntity {
+ // Opaque entity ID.
+ string entity_id = 1;
+
+ // Overall relevancy score for the entity.
+ // Not normalized and not comparable across different image queries.
+ float score = 2;
+
+ // Canonical description of the entity, in English.
+ string description = 3;
+ }
+
+ // Metadata for online images.
+ message WebImage {
+ // The result image URL.
+ string url = 1;
+
+ // (Deprecated) Overall relevancy score for the image.
+ float score = 2;
+ }
+
+ // Metadata for web pages.
+ message WebPage {
+ // The result web page URL.
+ string url = 1;
+
+ // (Deprecated) Overall relevancy score for the web page.
+ float score = 2;
+
+ // Title for the web page, may contain HTML markups.
+ string page_title = 3;
+
+ // Fully matching images on the page.
+ // Can include resized copies of the query image.
+ repeated WebImage full_matching_images = 4;
+
+ // Partial matching images on the page.
+ // Those images are similar enough to share some key-point features. For
+ // example an original image will likely have partial matching for its
+ // crops.
+ repeated WebImage partial_matching_images = 5;
+ }
+
+ // Label to provide extra metadata for the web detection.
+ message WebLabel {
+ // Label for extra metadata.
+ string label = 1;
+
+ // The BCP-47 language code for `label`, such as "en-US" or "sr-Latn".
+ // For more information, see
+ // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+ string language_code = 2;
+ }
+
+ // Deduced entities from similar images on the Internet.
+ repeated WebEntity web_entities = 1;
+
+ // Fully matching images from the Internet.
+ // Can include resized copies of the query image.
+ repeated WebImage full_matching_images = 2;
+
+ // Partial matching images from the Internet.
+ // Those images are similar enough to share some key-point features. For
+ // example an original image will likely have partial matching for its crops.
+ repeated WebImage partial_matching_images = 3;
+
+ // Web pages containing the matching images from the Internet.
+ repeated WebPage pages_with_matching_images = 4;
+
+ // The visually similar image results.
+ repeated WebImage visually_similar_images = 6;
+
+ // Best guess text labels for the request image.
+ repeated WebLabel best_guess_labels = 8;
+}
diff --git a/google/cloud/vision/v1p3beta1/BUILD.bazel b/google/cloud/vision/v1p3beta1/BUILD.bazel
new file mode 100644
index 000000000..05aeded73
--- /dev/null
+++ b/google/cloud/vision/v1p3beta1/BUILD.bazel
@@ -0,0 +1,158 @@
+# This is an API workspace, having public visibility by default makes perfect sense.
+package(default_visibility = ["//visibility:public"])
+
+##############################################################################
+# Common
+##############################################################################
+load("@com_google_api_codegen//rules_gapic:gapic.bzl", "proto_library_with_info")
+
+proto_library(
+ name = "vision_proto",
+ srcs = [
+ "geometry.proto",
+ "image_annotator.proto",
+ "product_search.proto",
+ "product_search_service.proto",
+ "text_annotation.proto",
+ "web_detection.proto",
+ ],
+ deps = [
+ "//google/api:annotations_proto",
+ "//google/longrunning:operations_proto",
+ "//google/rpc:status_proto",
+ "//google/type:color_proto",
+ "//google/type:latlng_proto",
+ "@com_google_protobuf//:empty_proto",
+ "@com_google_protobuf//:field_mask_proto",
+ "@com_google_protobuf//:timestamp_proto",
+ ],
+)
+
+proto_library_with_info(
+ name = "vision_proto_with_info",
+ deps = [":vision_proto"],
+)
+
+##############################################################################
+# Java
+##############################################################################
+load("@io_grpc_grpc_java//:java_grpc_library.bzl", "java_grpc_library")
+load(
+ "@com_google_api_codegen//rules_gapic/java:java_gapic.bzl",
+ "java_gapic_library",
+ "java_resource_name_proto_library",
+)
+load("@com_google_api_codegen//rules_gapic/java:java_gapic_pkg.bzl", "java_gapic_assembly_gradle_pkg")
+
+_JAVA_GRPC_DEPS = [
+ "@com_google_api_grpc_proto_google_common_protos//jar",
+]
+
+java_proto_library(
+ name = "vision_java_proto",
+ deps = [":vision_proto"],
+)
+
+java_grpc_library(
+ name = "vision_java_grpc",
+ srcs = [":vision_proto"],
+ deps = [":vision_java_proto"] + _JAVA_GRPC_DEPS,
+)
+
+java_resource_name_proto_library(
+ name = "vision_resource_name_java_proto",
+ gapic_yaml = "vision_gapic.yaml",
+ deps = [":vision_proto"],
+)
+
+java_gapic_library(
+ name = "vision_java_gapic",
+ src = ":vision_proto_with_info",
+ gapic_yaml = "vision_gapic.yaml",
+ service_yaml = "//google/cloud/vision:vision_v1p3beta1.yaml",
+ test_deps = [":vision_java_grpc"],
+ deps = [
+ ":vision_java_proto",
+ ":vision_resource_name_java_proto",
+ ] + _JAVA_GRPC_DEPS,
+)
+
+[java_test(
+ name = test_name,
+ test_class = test_name,
+ runtime_deps = [
+ ":vision_java_gapic_test",
+ ],
+) for test_name in [
+ "com.google.cloud.vision.v1p3beta1.ImageAnnotatorClientTest",
+]]
+
+# Opensource Packages
+java_gapic_assembly_gradle_pkg(
+ name = "google-cloud-vision-v1p3beta1-java",
+ client_deps = [":vision_java_gapic"],
+ client_group = "com.google.cloud",
+ client_test_deps = [":vision_java_gapic_test"],
+ grpc_deps = [":vision_java_grpc"],
+ grpc_group = "com.google.api.grpc",
+ proto_deps = [
+ ":vision_java_proto",
+ ":vision_proto",
+ ":vision_resource_name_java_proto",
+ ] + _JAVA_GRPC_DEPS,
+ version = "0.0.0-SNAPSHOT",
+)
+
+##############################################################################
+# Go
+##############################################################################
+load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
+load("@com_google_api_codegen//rules_gapic/go:go_gapic.bzl", "go_gapic_srcjar", "go_gapic_library")
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+load("@com_google_api_codegen//rules_gapic/go:go_gapic_pkg.bzl", "go_gapic_assembly_pkg")
+
+go_proto_library(
+ name = "vision_go_proto",
+ compilers = ["@io_bazel_rules_go//proto:go_grpc"],
+ importpath = "google.golang.org/genproto/googleapis/cloud/vision/v1p3beta1",
+ protos = [":vision_proto_with_info"],
+ deps = [
+ "//google/api:annotations_go_proto",
+ "//google/longrunning:longrunning_go_proto",
+ "//google/rpc:status_go_proto",
+ "//google/type:color_go_proto",
+ "//google/type:latlng_go_proto",
+ ],
+)
+
+go_gapic_library(
+ name = "vision_go_gapic",
+ src = ":vision_proto_with_info",
+ gapic_yaml = "vision_gapic.yaml",
+ importpath = "cloud.google.com/go/vision/apiv1p3beta1",
+ service_yaml = "//google/cloud/vision:vision_v1p3beta1.yaml",
+ deps = [
+ ":vision_go_proto",
+ "//google/longrunning:longrunning_go_gapic",
+ "//google/longrunning:longrunning_go_proto",
+ "@com_google_cloud_go//longrunning:go_default_library",
+ ],
+)
+
+go_test(
+ name = "vision_go_gapic_test",
+ srcs = [":vision_go_gapic_srcjar_test"],
+ embed = [":vision_go_gapic"],
+ importpath = "cloud.google.com/go/vision/apiv1p3beta1",
+)
+
+# Opensource Packages
+go_gapic_assembly_pkg(
+ name = "gapi-cloud-vision-v1p3beta1-go",
+ deps = [
+ ":vision_go_gapic",
+ ":vision_go_gapic_srcjar-smoke-test.srcjar",
+ ":vision_go_gapic_srcjar-test.srcjar",
+ ":vision_go_proto",
+ ],
+) \ No newline at end of file
diff --git a/google/cloud/vision/v1p3beta1/geometry.proto b/google/cloud/vision/v1p3beta1/geometry.proto
new file mode 100644
index 000000000..28b768eaa
--- /dev/null
+++ b/google/cloud/vision/v1p3beta1/geometry.proto
@@ -0,0 +1,75 @@
+// Copyright 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p3beta1;
+
+import "google/api/annotations.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p3beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "GeometryProto";
+option java_package = "com.google.cloud.vision.v1p3beta1";
+
+// A vertex represents a 2D point in the image.
+// NOTE: the vertex coordinates are in the same scale as the original image.
+message Vertex {
+ // X coordinate.
+ int32 x = 1;
+
+ // Y coordinate.
+ int32 y = 2;
+}
+
+// A vertex represents a 2D point in the image.
+// NOTE: the normalized vertex coordinates are relative to the original image
+// and range from 0 to 1.
+message NormalizedVertex {
+ // X coordinate.
+ float x = 1;
+
+ // Y coordinate.
+ float y = 2;
+}
+
+// A bounding polygon for the detected image annotation.
+message BoundingPoly {
+ // The bounding polygon vertices.
+ repeated Vertex vertices = 1;
+
+ // The bounding polygon normalized vertices.
+ repeated NormalizedVertex normalized_vertices = 2;
+}
+
+// A normalized bounding polygon around a portion of an image.
+message NormalizedBoundingPoly {
+ // Normalized vertices of the bounding polygon.
+ repeated NormalizedVertex vertices = 1;
+}
+
+// A 3D position in the image, used primarily for Face detection landmarks.
+// A valid Position must have both x and y coordinates.
+// The position coordinates are in the same scale as the original image.
+message Position {
+ // X coordinate.
+ float x = 1;
+
+ // Y coordinate.
+ float y = 2;
+
+ // Z coordinate (or depth).
+ float z = 3;
+}
diff --git a/google/cloud/vision/v1p3beta1/image_annotator.proto b/google/cloud/vision/v1p3beta1/image_annotator.proto
new file mode 100644
index 000000000..2ac3ddb8b
--- /dev/null
+++ b/google/cloud/vision/v1p3beta1/image_annotator.proto
@@ -0,0 +1,803 @@
+// Copyright 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p3beta1;
+
+import "google/api/annotations.proto";
+import "google/cloud/vision/v1p3beta1/geometry.proto";
+import "google/cloud/vision/v1p3beta1/product_search.proto";
+import "google/cloud/vision/v1p3beta1/text_annotation.proto";
+import "google/cloud/vision/v1p3beta1/web_detection.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+import "google/type/color.proto";
+import "google/type/latlng.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p3beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "ImageAnnotatorProto";
+option java_package = "com.google.cloud.vision.v1p3beta1";
+
+// Service that performs Google Cloud Vision API detection tasks over client
+// images, such as face, landmark, logo, label, and text detection. The
+// ImageAnnotator service returns detected entities from the images.
+service ImageAnnotator {
+ // Run image detection and annotation for a batch of images.
+ rpc BatchAnnotateImages(BatchAnnotateImagesRequest)
+ returns (BatchAnnotateImagesResponse) {
+ option (google.api.http) = {
+ post: "/v1p3beta1/images:annotate"
+ body: "*"
+ };
+ }
+
+ // Run asynchronous image detection and annotation for a list of generic
+ // files, such as PDF files, which may contain multiple pages and multiple
+ // images per page. Progress and results can be retrieved through the
+ // `google.longrunning.Operations` interface.
+ // `Operation.metadata` contains `OperationMetadata` (metadata).
+ // `Operation.response` contains `AsyncBatchAnnotateFilesResponse` (results).
+ rpc AsyncBatchAnnotateFiles(AsyncBatchAnnotateFilesRequest)
+ returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1p3beta1/files:asyncBatchAnnotate"
+ body: "*"
+ };
+ }
+}
+
+// The type of Google Cloud Vision API detection to perform, and the maximum
+// number of results to return for that type. Multiple `Feature` objects can
+// be specified in the `features` list.
+message Feature {
+ // Type of Google Cloud Vision API feature to be extracted.
+ enum Type {
+ // Unspecified feature type.
+ TYPE_UNSPECIFIED = 0;
+
+ // Run face detection.
+ FACE_DETECTION = 1;
+
+ // Run landmark detection.
+ LANDMARK_DETECTION = 2;
+
+ // Run logo detection.
+ LOGO_DETECTION = 3;
+
+ // Run label detection.
+ LABEL_DETECTION = 4;
+
+ // Run text detection / optical character recognition (OCR). Text detection
+ // is optimized for areas of text within a larger image; if the image is
+ // a document, use `DOCUMENT_TEXT_DETECTION` instead.
+ TEXT_DETECTION = 5;
+
+ // Run dense text document OCR. Takes precedence when both
+ // `DOCUMENT_TEXT_DETECTION` and `TEXT_DETECTION` are present.
+ DOCUMENT_TEXT_DETECTION = 11;
+
+ // Run Safe Search to detect potentially unsafe
+ // or undesirable content.
+ SAFE_SEARCH_DETECTION = 6;
+
+ // Compute a set of image properties, such as the
+ // image's dominant colors.
+ IMAGE_PROPERTIES = 7;
+
+ // Run crop hints.
+ CROP_HINTS = 9;
+
+ // Run web detection.
+ WEB_DETECTION = 10;
+
+ // Run Product Search.
+ PRODUCT_SEARCH = 12;
+
+ // Run localizer for object detection.
+ OBJECT_LOCALIZATION = 19;
+ }
+
+ // The feature type.
+ Type type = 1;
+
+ // Maximum number of results of this type. Does not apply to
+ // `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
+ int32 max_results = 2;
+
+ // Model to use for the feature.
+ // Supported values: "builtin/stable" (the default if unset) and
+ // "builtin/latest".
+ string model = 3;
+}
+
+// External image source (Google Cloud Storage or web URL image location).
+message ImageSource {
+ // **Use `image_uri` instead.**
+ //
+ // The Google Cloud Storage URI of the form
+ // `gs://bucket_name/object_name`. Object versioning is not supported. See
+ // [Google Cloud Storage Request
+ // URIs](https://cloud.google.com/storage/docs/reference-uris) for more info.
+ string gcs_image_uri = 1;
+
+ // The URI of the source image. Can be either:
+ //
+ // 1. A Google Cloud Storage URI of the form
+ // `gs://bucket_name/object_name`. Object versioning is not supported. See
+ // [Google Cloud Storage Request
+ // URIs](https://cloud.google.com/storage/docs/reference-uris) for more
+ // info.
+ //
+ // 2. A publicly-accessible image HTTP/HTTPS URL. When fetching images from
+ // HTTP/HTTPS URLs, Google cannot guarantee that the request will be
+ // completed. Your request may fail if the specified host denies the
+ // request (e.g. due to request throttling or DOS prevention), or if Google
+ // throttles requests to the site for abuse prevention. You should not
+ // depend on externally-hosted images for production applications.
+ //
+ // When both `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
+ // precedence.
+ string image_uri = 2;
+}
+
+// Client image to perform Google Cloud Vision API tasks over.
+message Image {
+ // Image content, represented as a stream of bytes.
+ // Note: As with all `bytes` fields, protobuffers use a pure binary
+ // representation, whereas JSON representations use base64.
+ bytes content = 1;
+
+ // Google Cloud Storage image location, or publicly-accessible image
+ // URL. If both `content` and `source` are provided for an image, `content`
+ // takes precedence and is used to perform the image annotation request.
+ ImageSource source = 2;
+}
+
+// A face annotation object contains the results of face detection.
+message FaceAnnotation {
+ // A face-specific landmark (for example, a face feature).
+ message Landmark {
+ // Face landmark (feature) type.
+ // Left and right are defined from the vantage of the viewer of the image
+ // without considering mirror projections typical of photos. So, `LEFT_EYE`,
+ // typically, is the person's right eye.
+ enum Type {
+ // Unknown face landmark detected. Should not be filled.
+ UNKNOWN_LANDMARK = 0;
+
+ // Left eye.
+ LEFT_EYE = 1;
+
+ // Right eye.
+ RIGHT_EYE = 2;
+
+ // Left of left eyebrow.
+ LEFT_OF_LEFT_EYEBROW = 3;
+
+ // Right of left eyebrow.
+ RIGHT_OF_LEFT_EYEBROW = 4;
+
+ // Left of right eyebrow.
+ LEFT_OF_RIGHT_EYEBROW = 5;
+
+ // Right of right eyebrow.
+ RIGHT_OF_RIGHT_EYEBROW = 6;
+
+ // Midpoint between eyes.
+ MIDPOINT_BETWEEN_EYES = 7;
+
+ // Nose tip.
+ NOSE_TIP = 8;
+
+ // Upper lip.
+ UPPER_LIP = 9;
+
+ // Lower lip.
+ LOWER_LIP = 10;
+
+ // Mouth left.
+ MOUTH_LEFT = 11;
+
+ // Mouth right.
+ MOUTH_RIGHT = 12;
+
+ // Mouth center.
+ MOUTH_CENTER = 13;
+
+ // Nose, bottom right.
+ NOSE_BOTTOM_RIGHT = 14;
+
+ // Nose, bottom left.
+ NOSE_BOTTOM_LEFT = 15;
+
+ // Nose, bottom center.
+ NOSE_BOTTOM_CENTER = 16;
+
+ // Left eye, top boundary.
+ LEFT_EYE_TOP_BOUNDARY = 17;
+
+ // Left eye, right corner.
+ LEFT_EYE_RIGHT_CORNER = 18;
+
+ // Left eye, bottom boundary.
+ LEFT_EYE_BOTTOM_BOUNDARY = 19;
+
+ // Left eye, left corner.
+ LEFT_EYE_LEFT_CORNER = 20;
+
+ // Right eye, top boundary.
+ RIGHT_EYE_TOP_BOUNDARY = 21;
+
+ // Right eye, right corner.
+ RIGHT_EYE_RIGHT_CORNER = 22;
+
+ // Right eye, bottom boundary.
+ RIGHT_EYE_BOTTOM_BOUNDARY = 23;
+
+ // Right eye, left corner.
+ RIGHT_EYE_LEFT_CORNER = 24;
+
+ // Left eyebrow, upper midpoint.
+ LEFT_EYEBROW_UPPER_MIDPOINT = 25;
+
+ // Right eyebrow, upper midpoint.
+ RIGHT_EYEBROW_UPPER_MIDPOINT = 26;
+
+ // Left ear tragion.
+ LEFT_EAR_TRAGION = 27;
+
+ // Right ear tragion.
+ RIGHT_EAR_TRAGION = 28;
+
+ // Left eye pupil.
+ LEFT_EYE_PUPIL = 29;
+
+ // Right eye pupil.
+ RIGHT_EYE_PUPIL = 30;
+
+ // Forehead glabella.
+ FOREHEAD_GLABELLA = 31;
+
+ // Chin gnathion.
+ CHIN_GNATHION = 32;
+
+ // Chin left gonion.
+ CHIN_LEFT_GONION = 33;
+
+ // Chin right gonion.
+ CHIN_RIGHT_GONION = 34;
+ }
+
+ // Face landmark type.
+ Type type = 3;
+
+ // Face landmark position.
+ Position position = 4;
+ }
+
+ // The bounding polygon around the face. The coordinates of the bounding box
+ // are in the original image's scale, as returned in `ImageParams`.
+ // The bounding box is computed to "frame" the face in accordance with human
+ // expectations. It is based on the landmarker results.
+ // Note that one or more x and/or y coordinates may not be generated in the
+ // `BoundingPoly` (the polygon will be unbounded) if only a partial face
+ // appears in the image to be annotated.
+ BoundingPoly bounding_poly = 1;
+
+ // The `fd_bounding_poly` bounding polygon is tighter than the
+ // `boundingPoly`, and encloses only the skin part of the face. Typically, it
+ // is used to eliminate the face from any image analysis that detects the
+ // "amount of skin" visible in an image. It is not based on the
+ // landmarker results, only on the initial face detection, hence
+ // the <code>fd</code> (face detection) prefix.
+ BoundingPoly fd_bounding_poly = 2;
+
+ // Detected face landmarks.
+ repeated Landmark landmarks = 3;
+
+ // Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
+ // of the face relative to the image vertical about the axis perpendicular to
+ // the face. Range [-180,180].
+ float roll_angle = 4;
+
+ // Yaw angle, which indicates the leftward/rightward angle that the face is
+ // pointing relative to the vertical plane perpendicular to the image. Range
+ // [-180,180].
+ float pan_angle = 5;
+
+ // Pitch angle, which indicates the upwards/downwards angle that the face is
+ // pointing relative to the image's horizontal plane. Range [-180,180].
+ float tilt_angle = 6;
+
+ // Detection confidence. Range [0, 1].
+ float detection_confidence = 7;
+
+ // Face landmarking confidence. Range [0, 1].
+ float landmarking_confidence = 8;
+
+ // Joy likelihood.
+ Likelihood joy_likelihood = 9;
+
+ // Sorrow likelihood.
+ Likelihood sorrow_likelihood = 10;
+
+ // Anger likelihood.
+ Likelihood anger_likelihood = 11;
+
+ // Surprise likelihood.
+ Likelihood surprise_likelihood = 12;
+
+ // Under-exposed likelihood.
+ Likelihood under_exposed_likelihood = 13;
+
+ // Blurred likelihood.
+ Likelihood blurred_likelihood = 14;
+
+ // Headwear likelihood.
+ Likelihood headwear_likelihood = 15;
+}
+
+// Detected entity location information.
+message LocationInfo {
+ // lat/long location coordinates.
+ google.type.LatLng lat_lng = 1;
+}
+
+// A `Property` consists of a user-supplied name/value pair.
+message Property {
+ // Name of the property.
+ string name = 1;
+
+ // Value of the property.
+ string value = 2;
+
+ // Value of numeric properties.
+ uint64 uint64_value = 3;
+}
+
+// Set of detected entity features.
+message EntityAnnotation {
+ // Opaque entity ID. Some IDs may be available in
+ // [Google Knowledge Graph Search
+ // API](https://developers.google.com/knowledge-graph/).
+ string mid = 1;
+
+ // The language code for the locale in which the entity textual
+ // `description` is expressed.
+ string locale = 2;
+
+ // Entity textual description, expressed in its `locale` language.
+ string description = 3;
+
+ // Overall score of the result. Range [0, 1].
+ float score = 4;
+
+ // **Deprecated. Use `score` instead.**
+ // The accuracy of the entity detection in an image.
+ // For example, for an image in which the "Eiffel Tower" entity is detected,
+ // this field represents the confidence that there is a tower in the query
+ // image. Range [0, 1].
+ float confidence = 5;
+
+ // The relevancy of the ICA (Image Content Annotation) label to the
+ // image. For example, the relevancy of "tower" is likely higher to an image
+ // containing the detected "Eiffel Tower" than to an image containing a
+ // detected distant towering building, even though the confidence that
+ // there is a tower in each image may be the same. Range [0, 1].
+ float topicality = 6;
+
+ // Image region to which this entity belongs. Not produced
+ // for `LABEL_DETECTION` features.
+ BoundingPoly bounding_poly = 7;
+
+ // The location information for the detected entity. Multiple
+ // `LocationInfo` elements can be present because one location may
+ // indicate the location of the scene in the image, and another location
+ // may indicate the location of the place where the image was taken.
+ // Location information is usually present for landmarks.
+ repeated LocationInfo locations = 8;
+
+ // Some entities may have optional user-supplied `Property` (name/value)
+ // fields, such a score or string that qualifies the entity.
+ repeated Property properties = 9;
+}
+
+// Set of detected objects with bounding boxes.
+message LocalizedObjectAnnotation {
+ // Object ID that should align with EntityAnnotation mid.
+ string mid = 1;
+
+ // The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+ // information, see
+ // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+ string language_code = 2;
+
+ // Object name, expressed in its `language_code` language.
+ string name = 3;
+
+ // Score of the result. Range [0, 1].
+ float score = 4;
+
+ // Image region to which this object belongs. This must be populated.
+ BoundingPoly bounding_poly = 5;
+}
+
+// Set of features pertaining to the image, computed by computer vision
+// methods over safe-search verticals (for example, adult, spoof, medical,
+// violence).
+message SafeSearchAnnotation {
+ // Represents the adult content likelihood for the image. Adult content may
+ // contain elements such as nudity, pornographic images or cartoons, or
+ // sexual activities.
+ Likelihood adult = 1;
+
+ // Spoof likelihood. The likelihood that an modification
+ // was made to the image's canonical version to make it appear
+ // funny or offensive.
+ Likelihood spoof = 2;
+
+ // Likelihood that this is a medical image.
+ Likelihood medical = 3;
+
+ // Likelihood that this image contains violent content.
+ Likelihood violence = 4;
+
+ // Likelihood that the request image contains racy content. Racy content may
+ // include (but is not limited to) skimpy or sheer clothing, strategically
+ // covered nudity, lewd or provocative poses, or close-ups of sensitive
+ // body areas.
+ Likelihood racy = 9;
+}
+
+// Rectangle determined by min and max `LatLng` pairs.
+message LatLongRect {
+ // Min lat/long pair.
+ google.type.LatLng min_lat_lng = 1;
+
+ // Max lat/long pair.
+ google.type.LatLng max_lat_lng = 2;
+}
+
+// Color information consists of RGB channels, score, and the fraction of
+// the image that the color occupies in the image.
+message ColorInfo {
+ // RGB components of the color.
+ google.type.Color color = 1;
+
+ // Image-specific score for this color. Value in range [0, 1].
+ float score = 2;
+
+ // The fraction of pixels the color occupies in the image.
+ // Value in range [0, 1].
+ float pixel_fraction = 3;
+}
+
+// Set of dominant colors and their corresponding scores.
+message DominantColorsAnnotation {
+ // RGB color values with their score and pixel fraction.
+ repeated ColorInfo colors = 1;
+}
+
+// Stores image properties, such as dominant colors.
+message ImageProperties {
+ // If present, dominant colors completed successfully.
+ DominantColorsAnnotation dominant_colors = 1;
+}
+
+// Single crop hint that is used to generate a new crop when serving an image.
+message CropHint {
+ // The bounding polygon for the crop region. The coordinates of the bounding
+ // box are in the original image's scale, as returned in `ImageParams`.
+ BoundingPoly bounding_poly = 1;
+
+ // Confidence of this being a salient region. Range [0, 1].
+ float confidence = 2;
+
+ // Fraction of importance of this salient region with respect to the original
+ // image.
+ float importance_fraction = 3;
+}
+
+// Set of crop hints that are used to generate new crops when serving images.
+message CropHintsAnnotation {
+ // Crop hint results.
+ repeated CropHint crop_hints = 1;
+}
+
+// Parameters for crop hints annotation request.
+message CropHintsParams {
+ // Aspect ratios in floats, representing the ratio of the width to the height
+ // of the image. For example, if the desired aspect ratio is 4/3, the
+ // corresponding float value should be 1.33333. If not specified, the
+ // best possible crop is returned. The number of provided aspect ratios is
+ // limited to a maximum of 16; any aspect ratios provided after the 16th are
+ // ignored.
+ repeated float aspect_ratios = 1;
+}
+
+// Parameters for web detection request.
+message WebDetectionParams {
+ // Whether to include results derived from the geo information in the image.
+ bool include_geo_results = 2;
+}
+
+// Image context and/or feature-specific parameters.
+message ImageContext {
+ // Not used.
+ LatLongRect lat_long_rect = 1;
+
+ // List of languages to use for TEXT_DETECTION. In most cases, an empty value
+ // yields the best results since it enables automatic language detection. For
+ // languages based on the Latin alphabet, setting `language_hints` is not
+ // needed. In rare cases, when the language of the text in the image is known,
+ // setting a hint will help get better results (although it will be a
+ // significant hindrance if the hint is wrong). Text detection returns an
+ // error if one or more of the specified languages is not one of the
+ // [supported languages](/vision/docs/languages).
+ repeated string language_hints = 2;
+
+ // Parameters for crop hints annotation request.
+ CropHintsParams crop_hints_params = 4;
+
+ // Parameters for product search.
+ google.cloud.vision.v1p3beta1.ProductSearchParams product_search_params = 5;
+
+ // Parameters for web detection.
+ WebDetectionParams web_detection_params = 6;
+}
+
+// Request for performing Google Cloud Vision API tasks over a user-provided
+// image, with user-requested features.
+message AnnotateImageRequest {
+ // The image to be processed.
+ Image image = 1;
+
+ // Requested features.
+ repeated Feature features = 2;
+
+ // Additional context that may accompany the image.
+ ImageContext image_context = 3;
+}
+
+// If an image was produced from a file (e.g. a PDF), this message gives
+// information about the source of that image.
+message ImageAnnotationContext {
+ // The URI of the file used to produce the image.
+ string uri = 1;
+
+ // If the file was a PDF or TIFF, this field gives the page number within
+ // the file used to produce the image.
+ int32 page_number = 2;
+}
+
+// Response to an image annotation request.
+message AnnotateImageResponse {
+ // If present, face detection has completed successfully.
+ repeated FaceAnnotation face_annotations = 1;
+
+ // If present, landmark detection has completed successfully.
+ repeated EntityAnnotation landmark_annotations = 2;
+
+ // If present, logo detection has completed successfully.
+ repeated EntityAnnotation logo_annotations = 3;
+
+ // If present, label detection has completed successfully.
+ repeated EntityAnnotation label_annotations = 4;
+
+ // If present, localized object detection has completed successfully.
+ // This will be sorted descending by confidence score.
+ repeated LocalizedObjectAnnotation localized_object_annotations = 22;
+
+ // If present, text (OCR) detection has completed successfully.
+ repeated EntityAnnotation text_annotations = 5;
+
+ // If present, text (OCR) detection or document (OCR) text detection has
+ // completed successfully.
+ // This annotation provides the structural hierarchy for the OCR detected
+ // text.
+ TextAnnotation full_text_annotation = 12;
+
+ // If present, safe-search annotation has completed successfully.
+ SafeSearchAnnotation safe_search_annotation = 6;
+
+ // If present, image properties were extracted successfully.
+ ImageProperties image_properties_annotation = 8;
+
+ // If present, crop hints have completed successfully.
+ CropHintsAnnotation crop_hints_annotation = 11;
+
+ // If present, web detection has completed successfully.
+ WebDetection web_detection = 13;
+
+ // If present, product search has completed successfully.
+ google.cloud.vision.v1p3beta1.ProductSearchResults product_search_results =
+ 14;
+
+ // If set, represents the error message for the operation.
+ // Note that filled-in image annotations are guaranteed to be
+ // correct, even when `error` is set.
+ google.rpc.Status error = 9;
+
+ // If present, contextual information is needed to understand where this image
+ // comes from.
+ ImageAnnotationContext context = 21;
+}
+
+// Response to a single file annotation request. A file may contain one or more
+// images, which individually have their own responses.
+message AnnotateFileResponse {
+ // Information about the file for which this response is generated.
+ InputConfig input_config = 1;
+
+ // Individual responses to images found within the file.
+ repeated AnnotateImageResponse responses = 2;
+}
+
+// Multiple image annotation requests are batched into a single service call.
+message BatchAnnotateImagesRequest {
+ // Individual image annotation requests for this batch.
+ repeated AnnotateImageRequest requests = 1;
+}
+
+// Response to a batch image annotation request.
+message BatchAnnotateImagesResponse {
+ // Individual responses to image annotation requests within the batch.
+ repeated AnnotateImageResponse responses = 1;
+}
+
+// An offline file annotation request.
+message AsyncAnnotateFileRequest {
+ // Required. Information about the input file.
+ InputConfig input_config = 1;
+
+ // Required. Requested features.
+ repeated Feature features = 2;
+
+ // Additional context that may accompany the image(s) in the file.
+ ImageContext image_context = 3;
+
+ // Required. The desired output location and metadata (e.g. format).
+ OutputConfig output_config = 4;
+}
+
+// The response for a single offline file annotation request.
+message AsyncAnnotateFileResponse {
+ // The output location and metadata from AsyncAnnotateFileRequest.
+ OutputConfig output_config = 1;
+}
+
+// Multiple async file annotation requests are batched into a single service
+// call.
+message AsyncBatchAnnotateFilesRequest {
+ // Individual async file annotation requests for this batch.
+ repeated AsyncAnnotateFileRequest requests = 1;
+}
+
+// Response to an async batch file annotation request.
+message AsyncBatchAnnotateFilesResponse {
+ // The list of file annotation responses, one for each request in
+ // AsyncBatchAnnotateFilesRequest.
+ repeated AsyncAnnotateFileResponse responses = 1;
+}
+
+// The desired input location and metadata.
+message InputConfig {
+ // The Google Cloud Storage location to read the input from.
+ GcsSource gcs_source = 1;
+
+ // The type of the file. Currently only "application/pdf" and "image/tiff"
+ // are supported. Wildcards are not supported.
+ string mime_type = 2;
+}
+
+// The desired output location and metadata.
+message OutputConfig {
+ // The Google Cloud Storage location to write the output(s) to.
+ GcsDestination gcs_destination = 1;
+
+ // The max number of response protos to put into each output JSON file on
+ // Google Cloud Storage.
+ // The valid range is [1, 100]. If not specified, the default value is 20.
+ //
+ // For example, for one pdf file with 100 pages, 100 response protos will
+ // be generated. If `batch_size` = 20, then 5 json files each
+ // containing 20 response protos will be written under the prefix
+ // `gcs_destination`.`uri`.
+ //
+ // Currently, batch_size only applies to GcsDestination, with potential future
+ // support for other output configurations.
+ int32 batch_size = 2;
+}
+
+// The Google Cloud Storage location where the input will be read from.
+message GcsSource {
+ // Google Cloud Storage URI for the input file. This must only be a
+ // Google Cloud Storage object. Wildcards are not currently supported.
+ string uri = 1;
+}
+
+// The Google Cloud Storage location where the output will be written to.
+message GcsDestination {
+ // Google Cloud Storage URI where the results will be stored. Results will
+ // be in JSON format and preceded by its corresponding input URI. This field
+ // can either represent a single file, or a prefix for multiple outputs.
+ // Prefixes must end in a `/`.
+ //
+ // Examples:
+ //
+ // * File: gs://bucket-name/filename.json
+ // * Prefix: gs://bucket-name/prefix/here/
+ // * File: gs://bucket-name/prefix/here
+ //
+ // If multiple outputs, each response is still AnnotateFileResponse, each of
+ // which contains some subset of the full list of AnnotateImageResponse.
+ // Multiple outputs can happen if, for example, the output JSON is too large
+ // and overflows into multiple sharded files.
+ string uri = 1;
+}
+
+// A bucketized representation of likelihood, which is intended to give clients
+// highly stable results across model upgrades.
+enum Likelihood {
+ // Unknown likelihood.
+ UNKNOWN = 0;
+
+ // It is very unlikely that the image belongs to the specified vertical.
+ VERY_UNLIKELY = 1;
+
+ // It is unlikely that the image belongs to the specified vertical.
+ UNLIKELY = 2;
+
+ // It is possible that the image belongs to the specified vertical.
+ POSSIBLE = 3;
+
+ // It is likely that the image belongs to the specified vertical.
+ LIKELY = 4;
+
+ // It is very likely that the image belongs to the specified vertical.
+ VERY_LIKELY = 5;
+}
+
+// Contains metadata for the BatchAnnotateImages operation.
+message OperationMetadata {
+ // Batch operation states.
+ enum State {
+ // Invalid.
+ STATE_UNSPECIFIED = 0;
+
+ // Request is received.
+ CREATED = 1;
+
+ // Request is actively being processed.
+ RUNNING = 2;
+
+ // The batch processing is done.
+ DONE = 3;
+
+ // The batch processing was cancelled.
+ CANCELLED = 4;
+ }
+ // Current state of the batch operation.
+ State state = 1;
+
+ // The time when the batch request was received.
+ google.protobuf.Timestamp create_time = 5;
+
+ // The time when the operation result was last updated.
+ google.protobuf.Timestamp update_time = 6;
+}
diff --git a/google/cloud/vision/v1p3beta1/product_search.proto b/google/cloud/vision/v1p3beta1/product_search.proto
new file mode 100644
index 000000000..6a30c0875
--- /dev/null
+++ b/google/cloud/vision/v1p3beta1/product_search.proto
@@ -0,0 +1,162 @@
+// Copyright 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p3beta1;
+
+import "google/api/annotations.proto";
+import "google/cloud/vision/v1p3beta1/geometry.proto";
+import "google/cloud/vision/v1p3beta1/product_search_service.proto";
+import "google/protobuf/timestamp.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p3beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "ProductSearchProto";
+option java_package = "com.google.cloud.vision.v1p3beta1";
+
+// Parameters for a product search request.
+message ProductSearchParams {
+ // The resource name of the catalog to search.
+ //
+ // Format is: `productSearch/catalogs/CATALOG_NAME`.
+ string catalog_name = 1;
+
+ // The category to search in.
+ // Optional. It is inferred by the system if it is not specified.
+ // [Deprecated] Use `product_category`.
+ ProductSearchCategory category = 2;
+
+ // The product category to search in.
+ // Optional. It is inferred by the system if it is not specified.
+ // Supported values are `bag`, `shoe`, `sunglasses`, `dress`, `outerwear`,
+ // `skirt`, `top`, `shorts`, and `pants`.
+ string product_category = 5;
+
+ // The bounding polygon around the area of interest in the image.
+ // Optional. If it is not specified, system discretion will be applied.
+ // [Deprecated] Use `bounding_poly`.
+ NormalizedBoundingPoly normalized_bounding_poly = 3;
+
+ // The bounding polygon around the area of interest in the image.
+ // Optional. If it is not specified, system discretion will be applied.
+ BoundingPoly bounding_poly = 9;
+
+ // Specifies the verbosity of the product search results.
+ // Optional. Defaults to `BASIC`.
+ ProductSearchResultsView view = 4;
+
+ // The resource name of a
+ // [ProductSet][google.cloud.vision.v1p3beta1.ProductSet] to be searched for
+ // similar images.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
+ string product_set = 6;
+
+ // The list of product categories to search in. Currently, we only consider
+ // the first category, and either "homegoods" or "apparel" should be
+ // specified.
+ repeated string product_categories = 7;
+
+ // The filtering expression. This can be used to restrict search results based
+ // on Product labels. We currently support an AND of OR of key-value
+ // expressions, where each expression within an OR must have the same key.
+ //
+ // For example, "(color = red OR color = blue) AND brand = Google" is
+ // acceptable, but not "(color = red OR brand = Google)" or "color: red".
+ string filter = 8;
+}
+
+// Results for a product search request.
+message ProductSearchResults {
+ // Information about a product.
+ message ProductInfo {
+ // Product ID.
+ string product_id = 1;
+
+ // The URI of the image which matched the query image.
+ //
+ // This field is returned only if `view` is set to `FULL` in
+ // the request.
+ string image_uri = 2;
+
+ // A confidence level on the match, ranging from 0 (no confidence) to
+ // 1 (full confidence).
+ //
+ // This field is returned only if `view` is set to `FULL` in
+ // the request.
+ float score = 3;
+ }
+
+ // Information about a product.
+ message Result {
+ // The Product.
+ Product product = 1;
+
+ // A confidence level on the match, ranging from 0 (no confidence) to
+ // 1 (full confidence).
+ //
+ // This field is returned only if `view` is set to `FULL` in
+ // the request.
+ float score = 2;
+
+ // The resource name of the image from the product that is the closest match
+ // to the query.
+ string image = 3;
+ }
+
+ // Product category.
+ // [Deprecated] Use `product_category`.
+ ProductSearchCategory category = 1;
+
+ // Product category.
+ // Supported values are `bag` and `shoe`.
+ // [Deprecated] `product_category` is provided in each Product.
+ string product_category = 4;
+
+ // Timestamp of the index which provided these results. Changes made after
+ // this time are not reflected in the current results.
+ google.protobuf.Timestamp index_time = 2;
+
+ // List of detected products.
+ repeated ProductInfo products = 3;
+
+ // List of results, one for each product match.
+ repeated Result results = 5;
+}
+
+// Supported product search categories.
+enum ProductSearchCategory {
+ // Default value used when a category is not specified.
+ PRODUCT_SEARCH_CATEGORY_UNSPECIFIED = 0;
+
+ // Shoes category.
+ SHOES = 1;
+
+ // Bags category.
+ BAGS = 2;
+}
+
+// Specifies the fields to include in product search results.
+enum ProductSearchResultsView {
+ // Product search results contain only `product_category` and `product_id`.
+ // Default value.
+ BASIC = 0;
+
+ // Product search results contain `product_category`, `product_id`,
+ // `image_uri`, and `score`.
+ FULL = 1;
+}
diff --git a/google/cloud/vision/v1p3beta1/product_search_service.proto b/google/cloud/vision/v1p3beta1/product_search_service.proto
new file mode 100644
index 000000000..23919ee7e
--- /dev/null
+++ b/google/cloud/vision/v1p3beta1/product_search_service.proto
@@ -0,0 +1,842 @@
+// Copyright 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p3beta1;
+
+import "google/api/annotations.proto";
+import "google/cloud/vision/v1p3beta1/geometry.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p3beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "ProductSearchServiceProto";
+option java_package = "com.google.cloud.vision.v1p3beta1";
+
+// Manages Products and ProductSets of reference images for use in product
+// search. It uses the following resource model:
+//
+// - The API has a collection of
+// [ProductSet][google.cloud.vision.v1p3beta1.ProductSet] resources, named
+// `projects/*/locations/*/productSets/*`, which acts as a way to put different
+// products into groups to limit identification.
+//
+// In parallel,
+//
+// - The API has a collection of
+// [Product][google.cloud.vision.v1p3beta1.Product] resources, named
+// `projects/*/locations/*/products/*`
+//
+// - Each [Product][google.cloud.vision.v1p3beta1.Product] has a collection of
+// [ReferenceImage][google.cloud.vision.v1p3beta1.ReferenceImage] resources,
+// named
+// `projects/*/locations/*/products/*/referenceImages/*`
+service ProductSearch {
+ // Creates and returns a new ProductSet resource.
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if display_name is missing, or is longer than
+ // 4096 characters.
+ rpc CreateProductSet(CreateProductSetRequest) returns (ProductSet) {
+ option (google.api.http) = {
+ post: "/v1p3beta1/{parent=projects/*/locations/*}/productSets"
+ body: "product_set"
+ };
+ }
+
+ // Lists ProductSets in an unspecified order.
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if page_size is greater than 100, or less
+ // than 1.
+ rpc ListProductSets(ListProductSetsRequest)
+ returns (ListProductSetsResponse) {
+ option (google.api.http) = {
+ get: "/v1p3beta1/{parent=projects/*/locations/*}/productSets"
+ };
+ }
+
+ // Gets information associated with a ProductSet.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the ProductSet does not exist.
+ rpc GetProductSet(GetProductSetRequest) returns (ProductSet) {
+ option (google.api.http) = {
+ get: "/v1p3beta1/{name=projects/*/locations/*/productSets/*}"
+ };
+ }
+
+ // Makes changes to a ProductSet resource.
+ // Only display_name can be updated currently.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the ProductSet does not exist.
+ // * Returns INVALID_ARGUMENT if display_name is present in update_mask but
+ // missing from the request or longer than 4096 characters.
+ rpc UpdateProductSet(UpdateProductSetRequest) returns (ProductSet) {
+ option (google.api.http) = {
+ patch: "/v1p3beta1/{product_set.name=projects/*/locations/*/productSets/*}"
+ body: "product_set"
+ };
+ }
+
+ // Permanently deletes a ProductSet. All Products and ReferenceImages in the
+ // ProductSet will be deleted.
+ //
+ // The actual image files are not deleted from Google Cloud Storage.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the ProductSet does not exist.
+ rpc DeleteProductSet(DeleteProductSetRequest)
+ returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ delete: "/v1p3beta1/{name=projects/*/locations/*/productSets/*}"
+ };
+ }
+
+ // Creates and returns a new product resource.
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if display_name is missing or longer than 4096
+ // characters.
+ // * Returns INVALID_ARGUMENT if description is longer than 4096 characters.
+ // * Returns INVALID_ARGUMENT if product_category is missing or invalid.
+ rpc CreateProduct(CreateProductRequest) returns (Product) {
+ option (google.api.http) = {
+ post: "/v1p3beta1/{parent=projects/*/locations/*}/products"
+ body: "product"
+ };
+ }
+
+ // Lists products in an unspecified order.
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if page_size is greater than 100 or less than 1.
+ rpc ListProducts(ListProductsRequest) returns (ListProductsResponse) {
+ option (google.api.http) = {
+ get: "/v1p3beta1/{parent=projects/*/locations/*}/products"
+ };
+ }
+
+ // Gets information associated with a Product.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the Product does not exist.
+ rpc GetProduct(GetProductRequest) returns (Product) {
+ option (google.api.http) = {
+ get: "/v1p3beta1/{name=projects/*/locations/*/products/*}"
+ };
+ }
+
+ // Makes changes to a Product resource.
+ // Only display_name, description and labels can be updated right now.
+ //
+ // If labels are updated, the change will not be reflected in queries until
+ // the next index time.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the Product does not exist.
+ // * Returns INVALID_ARGUMENT if display_name is present in update_mask but is
+ // missing from the request or longer than 4096 characters.
+ // * Returns INVALID_ARGUMENT if description is present in update_mask but is
+ // longer than 4096 characters.
+ // * Returns INVALID_ARGUMENT if product_category is present in update_mask.
+ rpc UpdateProduct(UpdateProductRequest) returns (Product) {
+ option (google.api.http) = {
+ patch: "/v1p3beta1/{product.name=projects/*/locations/*/products/*}"
+ body: "product"
+ };
+ }
+
+ // Permanently deletes a product and its reference images.
+ //
+ // Metadata of the product and all its images will be deleted right away, but
+ // search queries against ProductSets containing the product may still work
+ // until all related caches are refreshed.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the product does not exist.
+ rpc DeleteProduct(DeleteProductRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ delete: "/v1p3beta1/{name=projects/*/locations/*/products/*}"
+ };
+ }
+
+ // Creates and returns a new ReferenceImage resource.
+ //
+ // The `bounding_poly` field is optional. If `bounding_poly` is not specified,
+ // the system will try to detect regions of interest in the image that are
+ // compatible with the product_category on the parent product. If it is
+ // specified, detection is ALWAYS skipped. The system converts polygons into
+ // non-rotated rectangles.
+ //
+ // Note that the pipeline will resize the image if the image resolution is too
+ // large to process (above 50MP).
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if the image_uri is missing or longer than 4096
+ // characters.
+ // * Returns INVALID_ARGUMENT if the product does not exist.
+ // * Returns INVALID_ARGUMENT if bounding_poly is not provided, and nothing
+ // compatible with the parent product's product_category is detected.
+ // * Returns INVALID_ARGUMENT if bounding_poly contains more than 10 polygons.
+ rpc CreateReferenceImage(CreateReferenceImageRequest)
+ returns (ReferenceImage) {
+ option (google.api.http) = {
+ post: "/v1p3beta1/{parent=projects/*/locations/*/products/*}/referenceImages"
+ body: "reference_image"
+ };
+ }
+
+ // Permanently deletes a reference image.
+ //
+ // The image metadata will be deleted right away, but search queries
+ // against ProductSets containing the image may still work until all related
+ // caches are refreshed.
+ //
+ // The actual image files are not deleted from Google Cloud Storage.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the reference image does not exist.
+ rpc DeleteReferenceImage(DeleteReferenceImageRequest)
+ returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ delete: "/v1p3beta1/{name=projects/*/locations/*/products/*/referenceImages/*}"
+ };
+ }
+
+ // Lists reference images.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the parent product does not exist.
+ // * Returns INVALID_ARGUMENT if the page_size is greater than 100, or less
+ // than 1.
+ rpc ListReferenceImages(ListReferenceImagesRequest)
+ returns (ListReferenceImagesResponse) {
+ option (google.api.http) = {
+ get: "/v1p3beta1/{parent=projects/*/locations/*/products/*}/referenceImages"
+ };
+ }
+
+ // Gets information associated with a ReferenceImage.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the specified image does not exist.
+ rpc GetReferenceImage(GetReferenceImageRequest) returns (ReferenceImage) {
+ option (google.api.http) = {
+ get: "/v1p3beta1/{name=projects/*/locations/*/products/*/referenceImages/*}"
+ };
+ }
+
+ // Adds a Product to the specified ProductSet. If the Product is already
+ // present, no change is made.
+ //
+ // One Product can be added to at most 100 ProductSets.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the Product or the ProductSet doesn't exist.
+ rpc AddProductToProductSet(AddProductToProductSetRequest)
+ returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ post: "/v1p3beta1/{name=projects/*/locations/*/productSets/*}:addProduct"
+ body: "*"
+ };
+ }
+
+ // Removes a Product from the specified ProductSet.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND If the Product is not found under the ProductSet.
+ rpc RemoveProductFromProductSet(RemoveProductFromProductSetRequest)
+ returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ post: "/v1p3beta1/{name=projects/*/locations/*/productSets/*}:removeProduct"
+ body: "*"
+ };
+ }
+
+ // Lists the Products in a ProductSet, in an unspecified order. If the
+ // ProductSet does not exist, the products field of the response will be
+ // empty.
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if page_size is greater than 100 or less than 1.
+ rpc ListProductsInProductSet(ListProductsInProductSetRequest)
+ returns (ListProductsInProductSetResponse) {
+ option (google.api.http) = {
+ get: "/v1p3beta1/{name=projects/*/locations/*/productSets/*}/products"
+ };
+ }
+
+ // Asynchronous API that imports a list of reference images to specified
+ // product sets based on a list of image information.
+ //
+ // The [google.longrunning.Operation][google.longrunning.Operation] API can be
+ // used to keep track of the progress and results of the request.
+ // `Operation.metadata` contains `BatchOperationMetadata`. (progress)
+ // `Operation.response` contains `ImportProductSetsResponse`. (results)
+ //
+ // The input source of this method is a csv file on Google Cloud Storage.
+ // For the format of the csv file please see
+ // [ImportProductSetsGcsSource.csv_file_uri][google.cloud.vision.v1p3beta1.ImportProductSetsGcsSource.csv_file_uri].
+ rpc ImportProductSets(ImportProductSetsRequest)
+ returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1p3beta1/{parent=projects/*/locations/*}/productSets:import"
+ body: "*"
+ };
+ }
+}
+
+// A Product contains ReferenceImages.
+message Product {
+ // A product label represented as a key-value pair.
+ message KeyValue {
+ // The key of the label attached to the product. Cannot be empty and cannot
+ // exceed 128 bytes.
+ string key = 1;
+
+ // The value of the label attached to the product. Cannot be empty and
+ // cannot exceed 128 bytes.
+ string value = 2;
+ }
+
+ // The resource name of the product.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
+ //
+ // This field is ignored when creating a product.
+ string name = 1;
+
+ // The user-provided name for this Product. Must not be empty. Must be at most
+ // 4096 characters long.
+ string display_name = 2;
+
+ // User-provided metadata to be stored with this product. Must be at most 4096
+ // characters long.
+ string description = 3;
+
+ // The category for the product identified by the reference image. This should
+ // be either "homegoods" or "apparel".
+ //
+ // This field is immutable.
+ string product_category = 4;
+
+ // Key-value pairs that can be attached to a product. At query time,
+ // constraints can be specified based on the product_labels.
+ //
+ // Note that integer values can be provided as strings, e.g. "1199". Only
+ // strings with integer values can match a range-based restriction which is
+ // to be supported soon.
+ //
+ // Multiple values can be assigned to the same key. One product may have up to
+ // 100 product_labels.
+ repeated KeyValue product_labels = 5;
+}
+
+// A ProductSet contains Products. A ProductSet can contain a maximum of 1
+// million reference images. If the limit is exceeded, periodic indexing will
+// fail.
+message ProductSet {
+ // The resource name of the ProductSet.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
+ //
+ // This field is ignored when creating a ProductSet.
+ string name = 1;
+
+ // The user-provided name for this ProductSet. Must not be empty. Must be at
+ // most 4096 characters long.
+ string display_name = 2;
+
+ // Output only. The time at which this ProductSet was last indexed. Query
+ // results will reflect all updates before this time. If this ProductSet has
+ // never been indexed, this field is 0.
+ //
+ // This field is ignored when creating a ProductSet.
+ google.protobuf.Timestamp index_time = 3;
+
+ // Output only. If there was an error with indexing the product set, the field
+ // is populated.
+ //
+ // This field is ignored when creating a ProductSet.
+ google.rpc.Status index_error = 4;
+}
+
+// A `ReferenceImage` represents a product image and its associated metadata,
+// such as bounding boxes.
+message ReferenceImage {
+ // The resource name of the reference image.
+ //
+ // Format is:
+ //
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID/referenceImages/IMAGE_ID`.
+ //
+ // This field is ignored when creating a reference image.
+ string name = 1;
+
+ // The Google Cloud Storage URI of the reference image.
+ //
+ // The URI must start with `gs://`.
+ //
+ // Required.
+ string uri = 2;
+
+ // Bounding polygons around the areas of interest in the reference image.
+ // Optional. If this field is empty, the system will try to detect regions of
+ // interest. At most 10 bounding polygons will be used.
+ //
+ // The provided shape is converted into a non-rotated rectangle. Once
+ // converted, the small edge of the rectangle must be greater than or equal
+ // to 300 pixels. The aspect ratio must be 1:4 or less (i.e. 1:3 is ok; 1:5
+ // is not).
+ repeated BoundingPoly bounding_polys = 3;
+}
+
+// Request message for the `CreateProduct` method.
+message CreateProductRequest {
+ // The project in which the Product should be created.
+ //
+ // Format is
+ // `projects/PROJECT_ID/locations/LOC_ID`.
+ string parent = 1;
+
+ // The product to create.
+ Product product = 2;
+
+ // A user-supplied resource id for this Product. If set, the server will
+ // attempt to use this value as the resource id. If it is already in use, an
+ // error is returned with code ALREADY_EXISTS. Must be at most 128 characters
+ // long. It cannot contain the character `/`.
+ string product_id = 3;
+}
+
+// Request message for the `ListProducts` method.
+message ListProductsRequest {
+ // The project OR ProductSet from which Products should be listed.
+ //
+ // Format:
+ // `projects/PROJECT_ID/locations/LOC_ID`
+ string parent = 1;
+
+ // The maximum number of items to return. Default 10, maximum 100.
+ int32 page_size = 2;
+
+ // The next_page_token returned from a previous List request, if any.
+ string page_token = 3;
+}
+
+// Response message for the `ListProducts` method.
+message ListProductsResponse {
+ // List of products.
+ repeated Product products = 1;
+
+ // Token to retrieve the next page of results, or empty if there are no more
+ // results in the list.
+ string next_page_token = 2;
+}
+
+// Request message for the `GetProduct` method.
+message GetProductRequest {
+ // Resource name of the Product to get.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ string name = 1;
+}
+
+// Request message for the `UpdateProduct` method.
+message UpdateProductRequest {
+ // The Product resource which replaces the one on the server.
+ // product.name is immutable.
+ Product product = 1;
+
+ // The [FieldMask][google.protobuf.FieldMask] that specifies which fields
+ // to update.
+ // If update_mask isn't specified, all mutable fields are to be updated.
+ // Valid mask paths include `product_labels`, `display_name` and
+ // `description`.
+ google.protobuf.FieldMask update_mask = 2;
+}
+
+// Request message for the `DeleteProduct` method.
+message DeleteProductRequest {
+ // Resource name of product to delete.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ string name = 1;
+}
+
+// Request message for the `CreateProductSet` method.
+message CreateProductSetRequest {
+ // The project in which the ProductSet should be created.
+ //
+ // Format is `projects/PROJECT_ID/locations/LOC_ID`.
+ string parent = 1;
+
+ // The ProductSet to create.
+ ProductSet product_set = 2;
+
+ // A user-supplied resource id for this ProductSet. If set, the server will
+ // attempt to use this value as the resource id. If it is already in use, an
+ // error is returned with code ALREADY_EXISTS. Must be at most 128 characters
+ // long. It cannot contain the character `/`.
+ string product_set_id = 3;
+}
+
+// Request message for the `ListProductSets` method.
+message ListProductSetsRequest {
+ // The project from which ProductSets should be listed.
+ //
+ // Format is `projects/PROJECT_ID/locations/LOC_ID`.
+ string parent = 1;
+
+ // The maximum number of items to return. Default 10, maximum 100.
+ int32 page_size = 2;
+
+ // The next_page_token returned from a previous List request, if any.
+ string page_token = 3;
+}
+
+// Response message for the `ListProductSets` method.
+message ListProductSetsResponse {
+ // List of ProductSets.
+ repeated ProductSet product_sets = 1;
+
+ // Token to retrieve the next page of results, or empty if there are no more
+ // results in the list.
+ string next_page_token = 2;
+}
+
+// Request message for the `GetProductSet` method.
+message GetProductSetRequest {
+ // Resource name of the ProductSet to get.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOG_ID/productSets/PRODUCT_SET_ID`
+ string name = 1;
+}
+
+// Request message for the `UpdateProductSet` method.
+message UpdateProductSetRequest {
+ // The ProductSet resource which replaces the one on the server.
+ ProductSet product_set = 1;
+
+ // The [FieldMask][google.protobuf.FieldMask] that specifies which fields to
+ // update.
+ // If update_mask isn't specified, all mutable fields are to be updated.
+ // Valid mask path is `display_name`.
+ google.protobuf.FieldMask update_mask = 2;
+}
+
+// Request message for the `DeleteProductSet` method.
+message DeleteProductSetRequest {
+ // Resource name of the ProductSet to delete.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ string name = 1;
+}
+
+// Request message for the `CreateReferenceImage` method.
+message CreateReferenceImageRequest {
+ // Resource name of the product in which to create the reference image.
+ //
+ // Format is
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
+ string parent = 1;
+
+ // The reference image to create.
+ // If an image ID is specified, it is ignored.
+ ReferenceImage reference_image = 2;
+
+ // A user-supplied resource id for the ReferenceImage to be added. If set,
+ // the server will attempt to use this value as the resource id. If it is
+ // already in use, an error is returned with code ALREADY_EXISTS. Must be at
+ // most 128 characters long. It cannot contain the character `/`.
+ string reference_image_id = 3;
+}
+
+// Request message for the `ListReferenceImages` method.
+message ListReferenceImagesRequest {
+ // Resource name of the product containing the reference images.
+ //
+ // Format is
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
+ string parent = 1;
+
+ // The maximum number of items to return. Default 10, maximum 100.
+ int32 page_size = 2;
+
+ // A token identifying a page of results to be returned. This is the value
+ // of `nextPageToken` returned in a previous reference image list request.
+ //
+ // Defaults to the first page if not specified.
+ string page_token = 3;
+}
+
+// Response message for the `ListReferenceImages` method.
+message ListReferenceImagesResponse {
+ // The list of reference images.
+ repeated ReferenceImage reference_images = 1;
+
+ // The maximum number of items to return. Default 10, maximum 100.
+ int32 page_size = 2;
+
+ // The next_page_token returned from a previous List request, if any.
+ string next_page_token = 3;
+}
+
+// Request message for the `GetReferenceImage` method.
+message GetReferenceImageRequest {
+ // The resource name of the ReferenceImage to get.
+ //
+ // Format is:
+ //
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID/referenceImages/IMAGE_ID`.
+ string name = 1;
+}
+
+// Request message for the `DeleteReferenceImage` method.
+message DeleteReferenceImageRequest {
+ // The resource name of the reference image to delete.
+ //
+ // Format is:
+ //
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID/referenceImages/IMAGE_ID`
+ string name = 1;
+}
+
+// Request message for the `AddProductToProductSet` method.
+message AddProductToProductSetRequest {
+ // The resource name for the ProductSet to modify.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ string name = 1;
+
+ // The resource name for the Product to be added to this ProductSet.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ string product = 2;
+}
+
+// Request message for the `RemoveProductFromProductSet` method.
+message RemoveProductFromProductSetRequest {
+ // The resource name for the ProductSet to modify.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ string name = 1;
+
+ // The resource name for the Product to be removed from this ProductSet.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ string product = 2;
+}
+
+// Request message for the `ListProductsInProductSet` method.
+message ListProductsInProductSetRequest {
+ // The ProductSet resource for which to retrieve Products.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ string name = 1;
+
+ // The maximum number of items to return. Default 10, maximum 100.
+ int32 page_size = 2;
+
+ // The next_page_token returned from a previous List request, if any.
+ string page_token = 3;
+}
+
+// Response message for the `ListProductsInProductSet` method.
+message ListProductsInProductSetResponse {
+ // The list of Products.
+ repeated Product products = 1;
+
+ // Token to retrieve the next page of results, or empty if there are no more
+ // results in the list.
+ string next_page_token = 2;
+}
+
+// The Google Cloud Storage location for a csv file which preserves a list of
+// ImportProductSetRequests in each line.
+message ImportProductSetsGcsSource {
+ // The Google Cloud Storage URI of the input csv file.
+ //
+ // The URI must start with gs://
+ //
+ // The format of the input csv file should be one image per line.
+ // In each line, there are 6 columns.
+ // 1. image_uri
+ // 2, image_id
+ // 3. product_set_id
+ // 4. product_id
+ // 5, product_category
+ // 6, product_display_name
+ // 7, labels
+ // 8. bounding_poly
+ //
+ // Columns 1, 3, 4, and 5 are required, other columns are optional. A new
+ // ProductSet/Product with the same id will be created on the fly
+ // if the ProductSet/Product specified by product_set_id/product_id does not
+ // exist.
+ //
+ // The image_id field is optional but has to be unique if provided. If it is
+ // empty, we will automatically assign an unique id to the image.
+ //
+ // The product_display_name field is optional. If it is empty, a space (" ")
+ // is used as the place holder for the product display_name, which can
+ // be updated later through the realtime API.
+ //
+ // If the Product with product_id already exists, the fields
+ // product_display_name, product_category and labels are ignored.
+ //
+ // If a Product doesn't exist and needs to be created on the fly, the
+ // product_display_name field refers to
+ // [Product.display_name][google.cloud.vision.v1p3beta1.Product.display_name],
+ // the product_category field refers to
+ // [Product.product_category][google.cloud.vision.v1p3beta1.Product.product_category],
+ // and the labels field refers to [Product.labels][].
+ //
+ // Labels (optional) should be a line containing a list of comma-separated
+ // key-value pairs, with the format
+ // "key_1=value_1,key_2=value_2,...,key_n=value_n".
+ //
+ // The bounding_poly (optional) field is used to identify one region of
+ // interest from the image in the same manner as CreateReferenceImage. If no
+ // bounding_poly is specified, the system will try to detect regions of
+ // interest automatically.
+ //
+ // Note that the pipeline will resize the image if the image resolution is too
+ // large to process (above 20MP).
+ //
+ // Also note that at most one bounding_poly is allowed per line. If the image
+ // contains multiple regions of interest, the csv should contain one line per
+ // region of interest.
+ //
+ // The bounding_poly column should contain an even number of comma-separated
+ // numbers, with the format "p1_x,p1_y,p2_x,p2_y,...,pn_x,pn_y". Nonnegative
+ // integers should be used for absolute bounding polygons, and float values
+ // in [0, 1] should be used for normalized bounding polygons.
+ string csv_file_uri = 1;
+}
+
+// The input content for the `ImportProductSets` method.
+message ImportProductSetsInputConfig {
+ // The source of the input.
+ oneof source {
+ // The Google Cloud Storage location for a csv file which preserves a list
+ // of ImportProductSetRequests in each line.
+ ImportProductSetsGcsSource gcs_source = 1;
+ }
+}
+
+// Request message for the `ImportProductSets` method.
+message ImportProductSetsRequest {
+ // The project in which the ProductSets should be imported.
+ //
+ // Format is `projects/PROJECT_ID/locations/LOC_ID`.
+ string parent = 1;
+
+ // The input content for the list of requests.
+ ImportProductSetsInputConfig input_config = 2;
+}
+
+// Response message for the `ImportProductSets` method.
+//
+// This message is returned by the
+// [google.longrunning.Operations.GetOperation][google.longrunning.Operations.GetOperation]
+// method in the returned
+// [google.longrunning.Operation.response][google.longrunning.Operation.response]
+// field.
+message ImportProductSetsResponse {
+ // The list of reference_images that are imported successfully.
+ repeated ReferenceImage reference_images = 1;
+
+ // The rpc status for each ImportProductSet request, including both successes
+ // and errors.
+ //
+ // The number of statuses here matches the number of lines in the csv file,
+ // and statuses[i] stores the success or failure status of processing the i-th
+ // line of the csv, starting from line 0.
+ repeated google.rpc.Status statuses = 2;
+}
+
+// Metadata for the batch operations such as the current state.
+//
+// This is included in the `metadata` field of the `Operation` returned by the
+// `GetOperation` call of the `google::longrunning::Operations` service.
+message BatchOperationMetadata {
+ // Enumerates the possible states that the batch request can be in.
+ enum State {
+ // Invalid.
+ STATE_UNSPECIFIED = 0;
+
+ // Request is actively being processed.
+ PROCESSING = 1;
+
+ // The request is done and at least one item has been successfully
+ // processed.
+ SUCCESSFUL = 2;
+
+ // The request is done and no item has been successfully processed.
+ FAILED = 3;
+
+ // The request is done after the longrunning.Operations.CancelOperation has
+ // been called by the user. Any records that were processed before the
+ // cancel command are output as specified in the request.
+ CANCELLED = 4;
+ }
+
+ // The current state of the batch operation.
+ State state = 1;
+
+ // The time when the batch request was submitted to the server.
+ google.protobuf.Timestamp submit_time = 2;
+
+ // The time when the batch request is finished and
+ // [google.longrunning.Operation.done][google.longrunning.Operation.done] is
+ // set to true.
+ google.protobuf.Timestamp end_time = 3;
+}
diff --git a/google/cloud/vision/v1p3beta1/text_annotation.proto b/google/cloud/vision/v1p3beta1/text_annotation.proto
new file mode 100644
index 000000000..3c256c577
--- /dev/null
+++ b/google/cloud/vision/v1p3beta1/text_annotation.proto
@@ -0,0 +1,259 @@
+// Copyright 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p3beta1;
+
+import "google/api/annotations.proto";
+import "google/cloud/vision/v1p3beta1/geometry.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p3beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "TextAnnotationProto";
+option java_package = "com.google.cloud.vision.v1p3beta1";
+
+// TextAnnotation contains a structured representation of OCR extracted text.
+// The hierarchy of an OCR extracted text structure is like this:
+// TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
+// Each structural component, starting from Page, may further have their own
+// properties. Properties describe detected languages, breaks etc.. Please refer
+// to the
+// [TextAnnotation.TextProperty][google.cloud.vision.v1p3beta1.TextAnnotation.TextProperty]
+// message definition below for more detail.
+message TextAnnotation {
+ // Detected language for a structural component.
+ message DetectedLanguage {
+ // The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+ // information, see
+ // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+ string language_code = 1;
+
+ // Confidence of detected language. Range [0, 1].
+ float confidence = 2;
+ }
+
+ // Detected start or end of a structural component.
+ message DetectedBreak {
+ // Enum to denote the type of break found. New line, space etc.
+ enum BreakType {
+ // Unknown break label type.
+ UNKNOWN = 0;
+
+ // Regular space.
+ SPACE = 1;
+
+ // Sure space (very wide).
+ SURE_SPACE = 2;
+
+ // Line-wrapping break.
+ EOL_SURE_SPACE = 3;
+
+ // End-line hyphen that is not present in text; does not co-occur with
+ // `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`.
+ HYPHEN = 4;
+
+ // Line break that ends a paragraph.
+ LINE_BREAK = 5;
+ }
+
+ // Detected break type.
+ BreakType type = 1;
+
+ // True if break prepends the element.
+ bool is_prefix = 2;
+ }
+
+ // Additional information detected on the structural component.
+ message TextProperty {
+ // A list of detected languages together with confidence.
+ repeated DetectedLanguage detected_languages = 1;
+
+ // Detected start or end of a text segment.
+ DetectedBreak detected_break = 2;
+ }
+
+ // List of pages detected by OCR.
+ repeated Page pages = 1;
+
+ // UTF-8 text detected on the pages.
+ string text = 2;
+}
+
+// Detected page from OCR.
+message Page {
+ // Additional information detected on the page.
+ TextAnnotation.TextProperty property = 1;
+
+ // Page width. For PDFs the unit is points. For images (including
+ // TIFFs) the unit is pixels.
+ int32 width = 2;
+
+ // Page height. For PDFs the unit is points. For images (including
+ // TIFFs) the unit is pixels.
+ int32 height = 3;
+
+ // List of blocks of text, images etc on this page.
+ repeated Block blocks = 4;
+
+ // Confidence of the OCR results on the page. Range [0, 1].
+ float confidence = 5;
+}
+
+// Logical element on the page.
+message Block {
+ // Type of a block (text, image etc) as identified by OCR.
+ enum BlockType {
+ // Unknown block type.
+ UNKNOWN = 0;
+
+ // Regular text block.
+ TEXT = 1;
+
+ // Table block.
+ TABLE = 2;
+
+ // Image block.
+ PICTURE = 3;
+
+ // Horizontal/vertical line box.
+ RULER = 4;
+
+ // Barcode block.
+ BARCODE = 5;
+ }
+
+ // Additional information detected for the block.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the block.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ //
+ // * when the text is horizontal it might look like:
+ //
+ // 0----1
+ // | |
+ // 3----2
+ //
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ //
+ // 2----3
+ // | |
+ // 1----0
+ //
+ // and the vertice order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // List of paragraphs in this block (if this blocks is of type text).
+ repeated Paragraph paragraphs = 3;
+
+ // Detected block type (text, image etc) for this block.
+ BlockType block_type = 4;
+
+ // Confidence of the OCR results on the block. Range [0, 1].
+ float confidence = 5;
+}
+
+// Structural unit of text representing a number of words in certain order.
+message Paragraph {
+ // Additional information detected for the paragraph.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the paragraph.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ // * when the text is horizontal it might look like:
+ // 0----1
+ // | |
+ // 3----2
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ // 2----3
+ // | |
+ // 1----0
+ // and the vertice order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // List of words in this paragraph.
+ repeated Word words = 3;
+
+ // Confidence of the OCR results for the paragraph. Range [0, 1].
+ float confidence = 4;
+}
+
+// A word representation.
+message Word {
+ // Additional information detected for the word.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the word.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ // * when the text is horizontal it might look like:
+ // 0----1
+ // | |
+ // 3----2
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ // 2----3
+ // | |
+ // 1----0
+ // and the vertice order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // List of symbols in the word.
+ // The order of the symbols follows the natural reading order.
+ repeated Symbol symbols = 3;
+
+ // Confidence of the OCR results for the word. Range [0, 1].
+ float confidence = 4;
+}
+
+// A single symbol representation.
+message Symbol {
+ // Additional information detected for the symbol.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the symbol.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ // * when the text is horizontal it might look like:
+ // 0----1
+ // | |
+ // 3----2
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ // 2----3
+ // | |
+ // 1----0
+ // and the vertice order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // The actual UTF-8 representation of the symbol.
+ string text = 3;
+
+ // Confidence of the OCR results for the symbol. Range [0, 1].
+ float confidence = 4;
+}
diff --git a/google/cloud/vision/v1p3beta1/vision_gapic.yaml b/google/cloud/vision/v1p3beta1/vision_gapic.yaml
new file mode 100644
index 000000000..59c695328
--- /dev/null
+++ b/google/cloud/vision/v1p3beta1/vision_gapic.yaml
@@ -0,0 +1,444 @@
+type: com.google.api.codegen.ConfigProto
+config_schema_version: 1.0.0
+language_settings:
+ java:
+ package_name: com.google.cloud.vision.v1p3beta1
+ python:
+ package_name: google.cloud.vision_v1p3beta1.gapic
+ go:
+ package_name: cloud.google.com/go/vision/apiv1p3beta1
+ domain_layer_location: cloud.google.com/go/vision
+ csharp:
+ package_name: Google.Cloud.Vision.V1p3beta1
+ ruby:
+ package_name: Google::Cloud::Vision::V1p3beta1
+ php:
+ package_name: Google\Cloud\Vision\V1p3beta1
+ nodejs:
+ package_name: vision.v1p3beta1
+ domain_layer_location: google-cloud
+interfaces:
+- name: google.cloud.vision.v1p3beta1.ProductSearch
+ collections:
+ - name_pattern: projects/{project}/locations/{location}
+ entity_name: location
+ - name_pattern: projects/{project}/locations/{location}/productSets/{product_set}
+ entity_name: product_set
+ - name_pattern: projects/{project}/locations/{location}/products/{product}
+ entity_name: product
+ - name_pattern: projects/{project}/locations/{location}/products/{product}/referenceImages/{reference_image}
+ entity_name: reference_image
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes: []
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 20000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 20000
+ total_timeout_millis: 600000
+ methods:
+ - name: CreateProductSet
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - product_set
+ - product_set_id
+ required_fields:
+ - parent
+ - product_set
+ - product_set_id
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: location
+ timeout_millis: 60000
+ - name: ListProductSets
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ required_fields:
+ - parent
+ resource_name_treatment: STATIC_TYPES
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: product_sets
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: location
+ timeout_millis: 60000
+ - name: GetProductSet
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: product_set
+ timeout_millis: 60000
+ - name: UpdateProductSet
+ flattening:
+ groups:
+ - parameters:
+ - product_set
+ - update_mask
+ required_fields:
+ - product_set
+ - update_mask
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ product_set.name: product_set
+ timeout_millis: 60000
+ - name: DeleteProductSet
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: product_set
+ timeout_millis: 60000
+ - name: CreateProduct
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - product
+ - product_id
+ required_fields:
+ - parent
+ - product
+ - product_id
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: location
+ timeout_millis: 60000
+ - name: ListProducts
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ required_fields:
+ - parent
+ resource_name_treatment: STATIC_TYPES
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: products
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: location
+ timeout_millis: 60000
+ - name: GetProduct
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: product
+ timeout_millis: 60000
+ - name: UpdateProduct
+ flattening:
+ groups:
+ - parameters:
+ - product
+ - update_mask
+ required_fields:
+ - product
+ - update_mask
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ product.name: product
+ timeout_millis: 60000
+ - name: DeleteProduct
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: product
+ timeout_millis: 60000
+ - name: CreateReferenceImage
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - reference_image
+ - reference_image_id
+ required_fields:
+ - parent
+ - reference_image
+ - reference_image_id
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: product
+ timeout_millis: 60000
+ - name: DeleteReferenceImage
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: reference_image
+ timeout_millis: 60000
+ - name: ListReferenceImages
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ required_fields:
+ - parent
+ resource_name_treatment: STATIC_TYPES
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: reference_images
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: product
+ timeout_millis: 60000
+ - name: GetReferenceImage
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: reference_image
+ timeout_millis: 60000
+ - name: AddProductToProductSet
+ flattening:
+ groups:
+ - parameters:
+ - name
+ - product
+ required_fields:
+ - name
+ - product
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: product_set
+ timeout_millis: 60000
+ - name: RemoveProductFromProductSet
+ flattening:
+ groups:
+ - parameters:
+ - name
+ - product
+ required_fields:
+ - name
+ - product
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: product_set
+ timeout_millis: 60000
+ - name: ListProductsInProductSet
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ resource_name_treatment: STATIC_TYPES
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: products
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: product_set
+ timeout_millis: 60000
+ - name: ImportProductSets
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - input_config
+ required_fields:
+ - parent
+ - input_config
+ long_running:
+ return_type: google.cloud.vision.v1p3beta1.ImportProductSetsResponse
+ metadata_type: google.cloud.vision.v1p3beta1.BatchOperationMetadata
+ initial_poll_delay_millis: 20000
+ poll_delay_multiplier: 1.5
+ max_poll_delay_millis: 45000
+ total_poll_timeout_millis: 86400000
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: location
+ timeout_millis: 60000
+- name: google.cloud.vision.v1p3beta1.ImageAnnotator
+ smoke_test:
+ method: BatchAnnotateImages
+ init_fields:
+ - requests[0].image.source.gcs_image_uri="gs://gapic-toolkit/President_Barack_Obama.jpg"
+ - requests[0].features[0].type=FACE_DETECTION
+ collections: []
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes: []
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 60000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 60000
+ total_timeout_millis: 600000
+ methods:
+ - name: BatchAnnotateImages
+ flattening:
+ groups:
+ - parameters:
+ - requests
+ required_fields:
+ - requests
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: AsyncBatchAnnotateFiles
+ flattening:
+ groups:
+ - parameters:
+ - requests
+ required_fields:
+ - requests
+ long_running:
+ return_type: google.cloud.vision.v1p3beta1.AsyncBatchAnnotateFilesResponse
+ metadata_type: google.cloud.vision.v1p3beta1.OperationMetadata
+ initial_poll_delay_millis: 20000
+ poll_delay_multiplier: 1.5
+ max_poll_delay_millis: 45000
+ total_poll_timeout_millis: 86400000
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+resource_name_generation:
+- message_name: CreateProductSetRequest
+ field_entity_map:
+ parent: location
+- message_name: ListProductSetsRequest
+ field_entity_map:
+ parent: location
+- message_name: GetProductSetRequest
+ field_entity_map:
+ name: product_set
+- message_name: UpdateProductSetRequest
+ field_entity_map:
+ product_set.name: product_set
+- message_name: DeleteProductSetRequest
+ field_entity_map:
+ name: product_set
+- message_name: CreateProductRequest
+ field_entity_map:
+ parent: location
+- message_name: ListProductsRequest
+ field_entity_map:
+ parent: location
+- message_name: GetProductRequest
+ field_entity_map:
+ name: product
+- message_name: UpdateProductRequest
+ field_entity_map:
+ product.name: product
+- message_name: DeleteProductRequest
+ field_entity_map:
+ name: product
+- message_name: CreateReferenceImageRequest
+ field_entity_map:
+ parent: product
+- message_name: DeleteReferenceImageRequest
+ field_entity_map:
+ name: reference_image
+- message_name: ListReferenceImagesRequest
+ field_entity_map:
+ parent: product
+- message_name: GetReferenceImageRequest
+ field_entity_map:
+ name: reference_image
+- message_name: AddProductToProductSetRequest
+ field_entity_map:
+ name: product_set
+- message_name: RemoveProductFromProductSetRequest
+ field_entity_map:
+ name: product_set
+- message_name: ListProductsInProductSetRequest
+ field_entity_map:
+ name: product_set
+- message_name: ImportProductSetsRequest
+ field_entity_map:
+ parent: location
diff --git a/google/cloud/vision/v1p3beta1/web_detection.proto b/google/cloud/vision/v1p3beta1/web_detection.proto
new file mode 100644
index 000000000..cf9a22612
--- /dev/null
+++ b/google/cloud/vision/v1p3beta1/web_detection.proto
@@ -0,0 +1,104 @@
+// Copyright 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p3beta1;
+
+import "google/api/annotations.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p3beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "WebDetectionProto";
+option java_package = "com.google.cloud.vision.v1p3beta1";
+
+// Relevant information for the image from the Internet.
+message WebDetection {
+ // Entity deduced from similar images on the Internet.
+ message WebEntity {
+ // Opaque entity ID.
+ string entity_id = 1;
+
+ // Overall relevancy score for the entity.
+ // Not normalized and not comparable across different image queries.
+ float score = 2;
+
+ // Canonical description of the entity, in English.
+ string description = 3;
+ }
+
+ // Metadata for online images.
+ message WebImage {
+ // The result image URL.
+ string url = 1;
+
+ // (Deprecated) Overall relevancy score for the image.
+ float score = 2;
+ }
+
+ // Metadata for web pages.
+ message WebPage {
+ // The result web page URL.
+ string url = 1;
+
+ // (Deprecated) Overall relevancy score for the web page.
+ float score = 2;
+
+ // Title for the web page, may contain HTML markups.
+ string page_title = 3;
+
+ // Fully matching images on the page.
+ // Can include resized copies of the query image.
+ repeated WebImage full_matching_images = 4;
+
+ // Partial matching images on the page.
+ // Those images are similar enough to share some key-point features. For
+ // example an original image will likely have partial matching for its
+ // crops.
+ repeated WebImage partial_matching_images = 5;
+ }
+
+ // Label to provide extra metadata for the web detection.
+ message WebLabel {
+ // Label for extra metadata.
+ string label = 1;
+
+ // The BCP-47 language code for `label`, such as "en-US" or "sr-Latn".
+ // For more information, see
+ // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+ string language_code = 2;
+ }
+
+ // Deduced entities from similar images on the Internet.
+ repeated WebEntity web_entities = 1;
+
+ // Fully matching images from the Internet.
+ // Can include resized copies of the query image.
+ repeated WebImage full_matching_images = 2;
+
+ // Partial matching images from the Internet.
+ // Those images are similar enough to share some key-point features. For
+ // example an original image will likely have partial matching for its crops.
+ repeated WebImage partial_matching_images = 3;
+
+ // Web pages containing the matching images from the Internet.
+ repeated WebPage pages_with_matching_images = 4;
+
+ // The visually similar image results.
+ repeated WebImage visually_similar_images = 6;
+
+ // Best guess text labels for the request image.
+ repeated WebLabel best_guess_labels = 8;
+}
diff --git a/google/cloud/vision/v1p4beta1/BUILD.bazel b/google/cloud/vision/v1p4beta1/BUILD.bazel
new file mode 100644
index 000000000..de02672b8
--- /dev/null
+++ b/google/cloud/vision/v1p4beta1/BUILD.bazel
@@ -0,0 +1,158 @@
+# This is an API workspace, having public visibility by default makes perfect sense.
+package(default_visibility = ["//visibility:public"])
+
+##############################################################################
+# Common
+##############################################################################
+load("@com_google_api_codegen//rules_gapic:gapic.bzl", "proto_library_with_info")
+load(
+ "@com_google_api_codegen//rules_gapic/java:java_gapic.bzl",
+ "java_gapic_library",
+ "java_resource_name_proto_library",
+)
+
+proto_library(
+ name = "vision_proto",
+ srcs = [
+ "geometry.proto",
+ "image_annotator.proto",
+ "product_search.proto",
+ "product_search_service.proto",
+ "text_annotation.proto",
+ "web_detection.proto",
+ ],
+ deps = [
+ "//google/api:annotations_proto",
+ "//google/longrunning:operations_proto",
+ "//google/rpc:status_proto",
+ "//google/type:color_proto",
+ "//google/type:latlng_proto",
+ "@com_google_protobuf//:empty_proto",
+ "@com_google_protobuf//:field_mask_proto",
+ "@com_google_protobuf//:timestamp_proto",
+ ],
+)
+
+proto_library_with_info(
+ name = "vision_proto_with_info",
+ deps = [":vision_proto"],
+)
+
+##############################################################################
+# Java
+##############################################################################
+load("@io_grpc_grpc_java//:java_grpc_library.bzl", "java_grpc_library")
+load("@com_google_api_codegen//rules_gapic/java:java_gapic_pkg.bzl", "java_gapic_assembly_gradle_pkg")
+
+_JAVA_GRPC_DEPS = [
+ "@com_google_api_grpc_proto_google_common_protos//jar",
+]
+
+java_proto_library(
+ name = "vision_java_proto",
+ deps = [":vision_proto"],
+)
+
+java_grpc_library(
+ name = "vision_java_grpc",
+ srcs = [":vision_proto"],
+ deps = [":vision_java_proto"] + _JAVA_GRPC_DEPS,
+)
+
+java_resource_name_proto_library(
+ name = "vision_resource_name_java_proto",
+ gapic_yaml = "vision_gapic.yaml",
+ deps = [":vision_proto"],
+)
+
+java_gapic_library(
+ name = "vision_java_gapic",
+ src = ":vision_proto_with_info",
+ gapic_yaml = "vision_gapic.yaml",
+ service_yaml = "//google/cloud/vision:vision_v1p4beta1.yaml",
+ test_deps = [":vision_java_grpc"],
+ deps = [
+ ":vision_java_proto",
+ ":vision_resource_name_java_proto",
+ ] + _JAVA_GRPC_DEPS,
+)
+
+[java_test(
+ name = test_name,
+ test_class = test_name,
+ runtime_deps = [
+ ":vision_java_gapic_test",
+ ],
+) for test_name in [
+ "com.google.cloud.vision.v1p4beta1.ImageAnnotatorClientTest",
+]]
+
+# Opensource Packages
+java_gapic_assembly_gradle_pkg(
+ name = "google-cloud-vision-v1p4beta1-java",
+ client_deps = [":vision_java_gapic"],
+ client_group = "com.google.cloud",
+ client_test_deps = [":vision_java_gapic_test"],
+ grpc_deps = [":vision_java_grpc"],
+ grpc_group = "com.google.api.grpc",
+ proto_deps = [
+ ":vision_java_proto",
+ ":vision_proto",
+ ":vision_resource_name_java_proto",
+ ] + _JAVA_GRPC_DEPS,
+ version = "0.0.0-SNAPSHOT",
+)
+
+##############################################################################
+# Go
+##############################################################################
+load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
+load("@com_google_api_codegen//rules_gapic/go:go_gapic.bzl", "go_gapic_srcjar", "go_gapic_library")
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+load("@com_google_api_codegen//rules_gapic/go:go_gapic_pkg.bzl", "go_gapic_assembly_pkg")
+
+go_proto_library(
+ name = "vision_go_proto",
+ compilers = ["@io_bazel_rules_go//proto:go_grpc"],
+ importpath = "google.golang.org/genproto/googleapis/cloud/vision/v1p4beta1",
+ protos = [":vision_proto_with_info"],
+ deps = [
+ "//google/api:annotations_go_proto",
+ "//google/longrunning:longrunning_go_proto",
+ "//google/rpc:status_go_proto",
+ "//google/type:color_go_proto",
+ "//google/type:latlng_go_proto",
+ ],
+)
+
+go_gapic_library(
+ name = "vision_go_gapic",
+ src = ":vision_proto_with_info",
+ gapic_yaml = "vision_gapic.yaml",
+ importpath = "cloud.google.com/go/vision/apiv1p4beta1",
+ service_yaml = "//google/cloud/vision:vision_v1p4beta1.yaml",
+ deps = [
+ ":vision_go_proto",
+ "//google/longrunning:longrunning_go_gapic",
+ "//google/longrunning:longrunning_go_proto",
+ "@com_google_cloud_go//longrunning:go_default_library",
+ ],
+)
+
+go_test(
+ name = "vision_go_gapic_test",
+ srcs = [":vision_go_gapic_srcjar_test"],
+ embed = [":vision_go_gapic"],
+ importpath = "cloud.google.com/go/vision/apiv1p4beta1",
+)
+
+# Opensource Packages
+go_gapic_assembly_pkg(
+ name = "gapi-cloud-vision-v1p4beta1-go",
+ deps = [
+ ":vision_go_gapic",
+ ":vision_go_gapic_srcjar-smoke-test.srcjar",
+ ":vision_go_gapic_srcjar-test.srcjar",
+ ":vision_go_proto",
+ ],
+) \ No newline at end of file
diff --git a/google/cloud/vision/v1p4beta1/geometry.proto b/google/cloud/vision/v1p4beta1/geometry.proto
new file mode 100644
index 000000000..b0abd329c
--- /dev/null
+++ b/google/cloud/vision/v1p4beta1/geometry.proto
@@ -0,0 +1,71 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p4beta1;
+
+import "google/api/annotations.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p4beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "GeometryProto";
+option java_package = "com.google.cloud.vision.v1p4beta1";
+option objc_class_prefix = "GCVN";
+
+// A vertex represents a 2D point in the image.
+// NOTE: the vertex coordinates are in the same scale as the original image.
+message Vertex {
+ // X coordinate.
+ int32 x = 1;
+
+ // Y coordinate.
+ int32 y = 2;
+}
+
+// A vertex represents a 2D point in the image.
+// NOTE: the normalized vertex coordinates are relative to the original image
+// and range from 0 to 1.
+message NormalizedVertex {
+ // X coordinate.
+ float x = 1;
+
+ // Y coordinate.
+ float y = 2;
+}
+
+// A bounding polygon for the detected image annotation.
+message BoundingPoly {
+ // The bounding polygon vertices.
+ repeated Vertex vertices = 1;
+
+ // The bounding polygon normalized vertices.
+ repeated NormalizedVertex normalized_vertices = 2;
+}
+
+// A 3D position in the image, used primarily for Face detection landmarks.
+// A valid Position must have both x and y coordinates.
+// The position coordinates are in the same scale as the original image.
+message Position {
+ // X coordinate.
+ float x = 1;
+
+ // Y coordinate.
+ float y = 2;
+
+ // Z coordinate (or depth).
+ float z = 3;
+}
diff --git a/google/cloud/vision/v1p4beta1/image_annotator.proto b/google/cloud/vision/v1p4beta1/image_annotator.proto
new file mode 100644
index 000000000..ebbf92c35
--- /dev/null
+++ b/google/cloud/vision/v1p4beta1/image_annotator.proto
@@ -0,0 +1,905 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p4beta1;
+
+import "google/api/annotations.proto";
+import "google/cloud/vision/v1p4beta1/geometry.proto";
+import "google/cloud/vision/v1p4beta1/product_search.proto";
+import "google/cloud/vision/v1p4beta1/text_annotation.proto";
+import "google/cloud/vision/v1p4beta1/web_detection.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+import "google/type/color.proto";
+import "google/type/latlng.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p4beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "ImageAnnotatorProto";
+option java_package = "com.google.cloud.vision.v1p4beta1";
+option objc_class_prefix = "GCVN";
+
+// Service that performs Google Cloud Vision API detection tasks over client
+// images, such as face, landmark, logo, label, and text detection. The
+// ImageAnnotator service returns detected entities from the images.
+service ImageAnnotator {
+ // Run image detection and annotation for a batch of images.
+ rpc BatchAnnotateImages(BatchAnnotateImagesRequest)
+ returns (BatchAnnotateImagesResponse) {
+ option (google.api.http) = {
+ post: "/v1p4beta1/images:annotate"
+ body: "*"
+ };
+ }
+
+ // Service that performs image detection and annotation for a batch of files.
+ // Now only "application/pdf", "image/tiff" and "image/gif" are supported.
+ //
+ // This service will extract at most the first 10 frames (gif) or pages
+ // (pdf or tiff) from each file provided and perform detection and annotation
+ // for each image extracted.
+ rpc BatchAnnotateFiles(BatchAnnotateFilesRequest)
+ returns (BatchAnnotateFilesResponse) {
+ option (google.api.http) = {
+ post: "/v1p4beta1/files:annotate"
+ body: "*"
+ };
+ }
+
+ // Run asynchronous image detection and annotation for a list of images.
+ //
+ // Progress and results can be retrieved through the
+ // `google.longrunning.Operations` interface.
+ // `Operation.metadata` contains `OperationMetadata` (metadata).
+ // `Operation.response` contains `AsyncBatchAnnotateImagesResponse` (results).
+ //
+ // This service will write image annotation outputs to json files in customer
+ // GCS bucket, each json file containing BatchAnnotateImagesResponse proto.
+ rpc AsyncBatchAnnotateImages(AsyncBatchAnnotateImagesRequest)
+ returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1p4beta1/images:asyncBatchAnnotate"
+ body: "*"
+ };
+ }
+
+ // Run asynchronous image detection and annotation for a list of generic
+ // files, such as PDF files, which may contain multiple pages and multiple
+ // images per page. Progress and results can be retrieved through the
+ // `google.longrunning.Operations` interface.
+ // `Operation.metadata` contains `OperationMetadata` (metadata).
+ // `Operation.response` contains `AsyncBatchAnnotateFilesResponse` (results).
+ rpc AsyncBatchAnnotateFiles(AsyncBatchAnnotateFilesRequest)
+ returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1p4beta1/files:asyncBatchAnnotate"
+ body: "*"
+ };
+ }
+}
+
+// The type of Google Cloud Vision API detection to perform, and the maximum
+// number of results to return for that type. Multiple `Feature` objects can
+// be specified in the `features` list.
+message Feature {
+ // Type of Google Cloud Vision API feature to be extracted.
+ enum Type {
+ // Unspecified feature type.
+ TYPE_UNSPECIFIED = 0;
+
+ // Run face detection.
+ FACE_DETECTION = 1;
+
+ // Run landmark detection.
+ LANDMARK_DETECTION = 2;
+
+ // Run logo detection.
+ LOGO_DETECTION = 3;
+
+ // Run label detection.
+ LABEL_DETECTION = 4;
+
+ // Run text detection / optical character recognition (OCR). Text detection
+ // is optimized for areas of text within a larger image; if the image is
+ // a document, use `DOCUMENT_TEXT_DETECTION` instead.
+ TEXT_DETECTION = 5;
+
+ // Run dense text document OCR. Takes precedence when both
+ // `DOCUMENT_TEXT_DETECTION` and `TEXT_DETECTION` are present.
+ DOCUMENT_TEXT_DETECTION = 11;
+
+ // Run Safe Search to detect potentially unsafe
+ // or undesirable content.
+ SAFE_SEARCH_DETECTION = 6;
+
+ // Compute a set of image properties, such as the
+ // image's dominant colors.
+ IMAGE_PROPERTIES = 7;
+
+ // Run crop hints.
+ CROP_HINTS = 9;
+
+ // Run web detection.
+ WEB_DETECTION = 10;
+
+ // Run Product Search.
+ PRODUCT_SEARCH = 12;
+
+ // Run localizer for object detection.
+ OBJECT_LOCALIZATION = 19;
+ }
+
+ // The feature type.
+ Type type = 1;
+
+ // Maximum number of results of this type. Does not apply to
+ // `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
+ int32 max_results = 2;
+
+ // Model to use for the feature.
+ // Supported values: "builtin/stable" (the default if unset) and
+ // "builtin/latest".
+ string model = 3;
+}
+
+// External image source (Google Cloud Storage or web URL image location).
+message ImageSource {
+ // **Use `image_uri` instead.**
+ //
+ // The Google Cloud Storage URI of the form
+ // `gs://bucket_name/object_name`. Object versioning is not supported. See
+ // [Google Cloud Storage Request
+ // URIs](https://cloud.google.com/storage/docs/reference-uris) for more info.
+ string gcs_image_uri = 1;
+
+ // The URI of the source image. Can be either:
+ //
+ // 1. A Google Cloud Storage URI of the form
+ // `gs://bucket_name/object_name`. Object versioning is not supported. See
+ // [Google Cloud Storage Request
+ // URIs](https://cloud.google.com/storage/docs/reference-uris) for more
+ // info.
+ //
+ // 2. A publicly-accessible image HTTP/HTTPS URL. When fetching images from
+ // HTTP/HTTPS URLs, Google cannot guarantee that the request will be
+ // completed. Your request may fail if the specified host denies the
+ // request (e.g. due to request throttling or DOS prevention), or if Google
+ // throttles requests to the site for abuse prevention. You should not
+ // depend on externally-hosted images for production applications.
+ //
+ // When both `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
+ // precedence.
+ string image_uri = 2;
+}
+
+// Client image to perform Google Cloud Vision API tasks over.
+message Image {
+ // Image content, represented as a stream of bytes.
+ // Note: As with all `bytes` fields, protobuffers use a pure binary
+ // representation, whereas JSON representations use base64.
+ bytes content = 1;
+
+ // Google Cloud Storage image location, or publicly-accessible image
+ // URL. If both `content` and `source` are provided for an image, `content`
+ // takes precedence and is used to perform the image annotation request.
+ ImageSource source = 2;
+}
+
+// A bucketized representation of likelihood, which is intended to give clients
+// highly stable results across model upgrades.
+enum Likelihood {
+ // Unknown likelihood.
+ UNKNOWN = 0;
+
+ // It is very unlikely that the image belongs to the specified vertical.
+ VERY_UNLIKELY = 1;
+
+ // It is unlikely that the image belongs to the specified vertical.
+ UNLIKELY = 2;
+
+ // It is possible that the image belongs to the specified vertical.
+ POSSIBLE = 3;
+
+ // It is likely that the image belongs to the specified vertical.
+ LIKELY = 4;
+
+ // It is very likely that the image belongs to the specified vertical.
+ VERY_LIKELY = 5;
+}
+
+// A face annotation object contains the results of face detection.
+message FaceAnnotation {
+ // A face-specific landmark (for example, a face feature).
+ message Landmark {
+ // Face landmark (feature) type.
+ // Left and right are defined from the vantage of the viewer of the image
+ // without considering mirror projections typical of photos. So, `LEFT_EYE`,
+ // typically, is the person's right eye.
+ enum Type {
+ // Unknown face landmark detected. Should not be filled.
+ UNKNOWN_LANDMARK = 0;
+
+ // Left eye.
+ LEFT_EYE = 1;
+
+ // Right eye.
+ RIGHT_EYE = 2;
+
+ // Left of left eyebrow.
+ LEFT_OF_LEFT_EYEBROW = 3;
+
+ // Right of left eyebrow.
+ RIGHT_OF_LEFT_EYEBROW = 4;
+
+ // Left of right eyebrow.
+ LEFT_OF_RIGHT_EYEBROW = 5;
+
+ // Right of right eyebrow.
+ RIGHT_OF_RIGHT_EYEBROW = 6;
+
+ // Midpoint between eyes.
+ MIDPOINT_BETWEEN_EYES = 7;
+
+ // Nose tip.
+ NOSE_TIP = 8;
+
+ // Upper lip.
+ UPPER_LIP = 9;
+
+ // Lower lip.
+ LOWER_LIP = 10;
+
+ // Mouth left.
+ MOUTH_LEFT = 11;
+
+ // Mouth right.
+ MOUTH_RIGHT = 12;
+
+ // Mouth center.
+ MOUTH_CENTER = 13;
+
+ // Nose, bottom right.
+ NOSE_BOTTOM_RIGHT = 14;
+
+ // Nose, bottom left.
+ NOSE_BOTTOM_LEFT = 15;
+
+ // Nose, bottom center.
+ NOSE_BOTTOM_CENTER = 16;
+
+ // Left eye, top boundary.
+ LEFT_EYE_TOP_BOUNDARY = 17;
+
+ // Left eye, right corner.
+ LEFT_EYE_RIGHT_CORNER = 18;
+
+ // Left eye, bottom boundary.
+ LEFT_EYE_BOTTOM_BOUNDARY = 19;
+
+ // Left eye, left corner.
+ LEFT_EYE_LEFT_CORNER = 20;
+
+ // Right eye, top boundary.
+ RIGHT_EYE_TOP_BOUNDARY = 21;
+
+ // Right eye, right corner.
+ RIGHT_EYE_RIGHT_CORNER = 22;
+
+ // Right eye, bottom boundary.
+ RIGHT_EYE_BOTTOM_BOUNDARY = 23;
+
+ // Right eye, left corner.
+ RIGHT_EYE_LEFT_CORNER = 24;
+
+ // Left eyebrow, upper midpoint.
+ LEFT_EYEBROW_UPPER_MIDPOINT = 25;
+
+ // Right eyebrow, upper midpoint.
+ RIGHT_EYEBROW_UPPER_MIDPOINT = 26;
+
+ // Left ear tragion.
+ LEFT_EAR_TRAGION = 27;
+
+ // Right ear tragion.
+ RIGHT_EAR_TRAGION = 28;
+
+ // Left eye pupil.
+ LEFT_EYE_PUPIL = 29;
+
+ // Right eye pupil.
+ RIGHT_EYE_PUPIL = 30;
+
+ // Forehead glabella.
+ FOREHEAD_GLABELLA = 31;
+
+ // Chin gnathion.
+ CHIN_GNATHION = 32;
+
+ // Chin left gonion.
+ CHIN_LEFT_GONION = 33;
+
+ // Chin right gonion.
+ CHIN_RIGHT_GONION = 34;
+ }
+
+ // Face landmark type.
+ Type type = 3;
+
+ // Face landmark position.
+ Position position = 4;
+ }
+
+ // The bounding polygon around the face. The coordinates of the bounding box
+ // are in the original image's scale.
+ // The bounding box is computed to "frame" the face in accordance with human
+ // expectations. It is based on the landmarker results.
+ // Note that one or more x and/or y coordinates may not be generated in the
+ // `BoundingPoly` (the polygon will be unbounded) if only a partial face
+ // appears in the image to be annotated.
+ BoundingPoly bounding_poly = 1;
+
+ // The `fd_bounding_poly` bounding polygon is tighter than the
+ // `boundingPoly`, and encloses only the skin part of the face. Typically, it
+ // is used to eliminate the face from any image analysis that detects the
+ // "amount of skin" visible in an image. It is not based on the
+ // landmarker results, only on the initial face detection, hence
+ // the <code>fd</code> (face detection) prefix.
+ BoundingPoly fd_bounding_poly = 2;
+
+ // Detected face landmarks.
+ repeated Landmark landmarks = 3;
+
+ // Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
+ // of the face relative to the image vertical about the axis perpendicular to
+ // the face. Range [-180,180].
+ float roll_angle = 4;
+
+ // Yaw angle, which indicates the leftward/rightward angle that the face is
+ // pointing relative to the vertical plane perpendicular to the image. Range
+ // [-180,180].
+ float pan_angle = 5;
+
+ // Pitch angle, which indicates the upwards/downwards angle that the face is
+ // pointing relative to the image's horizontal plane. Range [-180,180].
+ float tilt_angle = 6;
+
+ // Detection confidence. Range [0, 1].
+ float detection_confidence = 7;
+
+ // Face landmarking confidence. Range [0, 1].
+ float landmarking_confidence = 8;
+
+ // Joy likelihood.
+ Likelihood joy_likelihood = 9;
+
+ // Sorrow likelihood.
+ Likelihood sorrow_likelihood = 10;
+
+ // Anger likelihood.
+ Likelihood anger_likelihood = 11;
+
+ // Surprise likelihood.
+ Likelihood surprise_likelihood = 12;
+
+ // Under-exposed likelihood.
+ Likelihood under_exposed_likelihood = 13;
+
+ // Blurred likelihood.
+ Likelihood blurred_likelihood = 14;
+
+ // Headwear likelihood.
+ Likelihood headwear_likelihood = 15;
+}
+
+// Detected entity location information.
+message LocationInfo {
+ // lat/long location coordinates.
+ google.type.LatLng lat_lng = 1;
+}
+
+// A `Property` consists of a user-supplied name/value pair.
+message Property {
+ // Name of the property.
+ string name = 1;
+
+ // Value of the property.
+ string value = 2;
+
+ // Value of numeric properties.
+ uint64 uint64_value = 3;
+}
+
+// Set of detected entity features.
+message EntityAnnotation {
+ // Opaque entity ID. Some IDs may be available in
+ // [Google Knowledge Graph Search
+ // API](https://developers.google.com/knowledge-graph/).
+ string mid = 1;
+
+ // The language code for the locale in which the entity textual
+ // `description` is expressed.
+ string locale = 2;
+
+ // Entity textual description, expressed in its `locale` language.
+ string description = 3;
+
+ // Overall score of the result. Range [0, 1].
+ float score = 4;
+
+ // **Deprecated. Use `score` instead.**
+ // The accuracy of the entity detection in an image.
+ // For example, for an image in which the "Eiffel Tower" entity is detected,
+ // this field represents the confidence that there is a tower in the query
+ // image. Range [0, 1].
+ float confidence = 5 [deprecated = true];
+
+ // The relevancy of the ICA (Image Content Annotation) label to the
+ // image. For example, the relevancy of "tower" is likely higher to an image
+ // containing the detected "Eiffel Tower" than to an image containing a
+ // detected distant towering building, even though the confidence that
+ // there is a tower in each image may be the same. Range [0, 1].
+ float topicality = 6;
+
+ // Image region to which this entity belongs. Not produced
+ // for `LABEL_DETECTION` features.
+ BoundingPoly bounding_poly = 7;
+
+ // The location information for the detected entity. Multiple
+ // `LocationInfo` elements can be present because one location may
+ // indicate the location of the scene in the image, and another location
+ // may indicate the location of the place where the image was taken.
+ // Location information is usually present for landmarks.
+ repeated LocationInfo locations = 8;
+
+ // Some entities may have optional user-supplied `Property` (name/value)
+ // fields, such a score or string that qualifies the entity.
+ repeated Property properties = 9;
+}
+
+// Set of detected objects with bounding boxes.
+message LocalizedObjectAnnotation {
+ // Object ID that should align with EntityAnnotation mid.
+ string mid = 1;
+
+ // The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+ // information, see
+ // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+ string language_code = 2;
+
+ // Object name, expressed in its `language_code` language.
+ string name = 3;
+
+ // Score of the result. Range [0, 1].
+ float score = 4;
+
+ // Image region to which this object belongs. This must be populated.
+ BoundingPoly bounding_poly = 5;
+}
+
+// Set of features pertaining to the image, computed by computer vision
+// methods over safe-search verticals (for example, adult, spoof, medical,
+// violence).
+message SafeSearchAnnotation {
+ // Represents the adult content likelihood for the image. Adult content may
+ // contain elements such as nudity, pornographic images or cartoons, or
+ // sexual activities.
+ Likelihood adult = 1;
+
+ // Spoof likelihood. The likelihood that an modification
+ // was made to the image's canonical version to make it appear
+ // funny or offensive.
+ Likelihood spoof = 2;
+
+ // Likelihood that this is a medical image.
+ Likelihood medical = 3;
+
+ // Likelihood that this image contains violent content.
+ Likelihood violence = 4;
+
+ // Likelihood that the request image contains racy content. Racy content may
+ // include (but is not limited to) skimpy or sheer clothing, strategically
+ // covered nudity, lewd or provocative poses, or close-ups of sensitive
+ // body areas.
+ Likelihood racy = 9;
+}
+
+// Rectangle determined by min and max `LatLng` pairs.
+message LatLongRect {
+ // Min lat/long pair.
+ google.type.LatLng min_lat_lng = 1;
+
+ // Max lat/long pair.
+ google.type.LatLng max_lat_lng = 2;
+}
+
+// Color information consists of RGB channels, score, and the fraction of
+// the image that the color occupies in the image.
+message ColorInfo {
+ // RGB components of the color.
+ google.type.Color color = 1;
+
+ // Image-specific score for this color. Value in range [0, 1].
+ float score = 2;
+
+ // The fraction of pixels the color occupies in the image.
+ // Value in range [0, 1].
+ float pixel_fraction = 3;
+}
+
+// Set of dominant colors and their corresponding scores.
+message DominantColorsAnnotation {
+ // RGB color values with their score and pixel fraction.
+ repeated ColorInfo colors = 1;
+}
+
+// Stores image properties, such as dominant colors.
+message ImageProperties {
+ // If present, dominant colors completed successfully.
+ DominantColorsAnnotation dominant_colors = 1;
+}
+
+// Single crop hint that is used to generate a new crop when serving an image.
+message CropHint {
+ // The bounding polygon for the crop region. The coordinates of the bounding
+ // box are in the original image's scale.
+ BoundingPoly bounding_poly = 1;
+
+ // Confidence of this being a salient region. Range [0, 1].
+ float confidence = 2;
+
+ // Fraction of importance of this salient region with respect to the original
+ // image.
+ float importance_fraction = 3;
+}
+
+// Set of crop hints that are used to generate new crops when serving images.
+message CropHintsAnnotation {
+ // Crop hint results.
+ repeated CropHint crop_hints = 1;
+}
+
+// Parameters for crop hints annotation request.
+message CropHintsParams {
+ // Aspect ratios in floats, representing the ratio of the width to the height
+ // of the image. For example, if the desired aspect ratio is 4/3, the
+ // corresponding float value should be 1.33333. If not specified, the
+ // best possible crop is returned. The number of provided aspect ratios is
+ // limited to a maximum of 16; any aspect ratios provided after the 16th are
+ // ignored.
+ repeated float aspect_ratios = 1;
+}
+
+// Parameters for web detection request.
+message WebDetectionParams {
+ // Whether to include results derived from the geo information in the image.
+ bool include_geo_results = 2;
+}
+
+// Image context and/or feature-specific parameters.
+message ImageContext {
+ // Not used.
+ LatLongRect lat_long_rect = 1;
+
+ // List of languages to use for TEXT_DETECTION. In most cases, an empty value
+ // yields the best results since it enables automatic language detection. For
+ // languages based on the Latin alphabet, setting `language_hints` is not
+ // needed. In rare cases, when the language of the text in the image is known,
+ // setting a hint will help get better results (although it will be a
+ // significant hindrance if the hint is wrong). Text detection returns an
+ // error if one or more of the specified languages is not one of the
+ // [supported languages](/vision/docs/languages).
+ repeated string language_hints = 2;
+
+ // Parameters for crop hints annotation request.
+ CropHintsParams crop_hints_params = 4;
+
+ // Parameters for product search.
+ ProductSearchParams product_search_params = 5;
+
+ // Parameters for web detection.
+ WebDetectionParams web_detection_params = 6;
+}
+
+// Request for performing Google Cloud Vision API tasks over a user-provided
+// image, with user-requested features, and with context information.
+message AnnotateImageRequest {
+ // The image to be processed.
+ Image image = 1;
+
+ // Requested features.
+ repeated Feature features = 2;
+
+ // Additional context that may accompany the image.
+ ImageContext image_context = 3;
+}
+
+// If an image was produced from a file (e.g. a PDF), this message gives
+// information about the source of that image.
+message ImageAnnotationContext {
+ // The URI of the file used to produce the image.
+ string uri = 1;
+
+ // If the file was a PDF or TIFF, this field gives the page number within
+ // the file used to produce the image.
+ int32 page_number = 2;
+}
+
+// Response to an image annotation request.
+message AnnotateImageResponse {
+ // If present, face detection has completed successfully.
+ repeated FaceAnnotation face_annotations = 1;
+
+ // If present, landmark detection has completed successfully.
+ repeated EntityAnnotation landmark_annotations = 2;
+
+ // If present, logo detection has completed successfully.
+ repeated EntityAnnotation logo_annotations = 3;
+
+ // If present, label detection has completed successfully.
+ repeated EntityAnnotation label_annotations = 4;
+
+ // If present, localized object detection has completed successfully.
+ // This will be sorted descending by confidence score.
+ repeated LocalizedObjectAnnotation localized_object_annotations = 22;
+
+ // If present, text (OCR) detection has completed successfully.
+ repeated EntityAnnotation text_annotations = 5;
+
+ // If present, text (OCR) detection or document (OCR) text detection has
+ // completed successfully.
+ // This annotation provides the structural hierarchy for the OCR detected
+ // text.
+ TextAnnotation full_text_annotation = 12;
+
+ // If present, safe-search annotation has completed successfully.
+ SafeSearchAnnotation safe_search_annotation = 6;
+
+ // If present, image properties were extracted successfully.
+ ImageProperties image_properties_annotation = 8;
+
+ // If present, crop hints have completed successfully.
+ CropHintsAnnotation crop_hints_annotation = 11;
+
+ // If present, web detection has completed successfully.
+ WebDetection web_detection = 13;
+
+ // If present, product search has completed successfully.
+ ProductSearchResults product_search_results = 14;
+
+ // If set, represents the error message for the operation.
+ // Note that filled-in image annotations are guaranteed to be
+ // correct, even when `error` is set.
+ google.rpc.Status error = 9;
+
+ // If present, contextual information is needed to understand where this image
+ // comes from.
+ ImageAnnotationContext context = 21;
+}
+
+// Response to a single file annotation request. A file may contain one or more
+// images, which individually have their own responses.
+message AnnotateFileResponse {
+ // Information about the file for which this response is generated.
+ InputConfig input_config = 1;
+
+ // Individual responses to images found within the file.
+ repeated AnnotateImageResponse responses = 2;
+
+ // This field gives the total number of pages in the file.
+ int32 total_pages = 3;
+}
+
+// Multiple image annotation requests are batched into a single service call.
+message BatchAnnotateImagesRequest {
+ // Individual image annotation requests for this batch.
+ repeated AnnotateImageRequest requests = 1;
+}
+
+// Response to a batch image annotation request.
+message BatchAnnotateImagesResponse {
+ // Individual responses to image annotation requests within the batch.
+ repeated AnnotateImageResponse responses = 1;
+}
+
+// A request to annotate one single file, e.g. a PDF, TIFF or GIF file.
+message AnnotateFileRequest {
+ // Required. Information about the input file.
+ InputConfig input_config = 1;
+
+ // Required. Requested features.
+ repeated Feature features = 2;
+
+ // Additional context that may accompany the image(s) in the file.
+ ImageContext image_context = 3;
+
+ // Pages of the file to perform image annotation.
+ //
+ // Pages starts from 1, we assume the first page of the file is page 1.
+ // At most 5 pages are supported per request. Pages can be negative.
+ //
+ // Page 1 means the first page.
+ // Page 2 means the second page.
+ // Page -1 means the last page.
+ // Page -2 means the second to the last page.
+ //
+ // If the file is GIF instead of PDF or TIFF, page refers to GIF frames.
+ //
+ // If this field is empty, by default the service performs image annotation
+ // for the first 5 pages of the file.
+ repeated int32 pages = 4;
+}
+
+// A list of requests to annotate files using the BatchAnnotateFiles API.
+message BatchAnnotateFilesRequest {
+ // The list of file annotation requests. Right now we support only one
+ // AnnotateFileRequest in BatchAnnotateFilesRequest.
+ repeated AnnotateFileRequest requests = 1;
+}
+
+// A list of file annotation responses.
+message BatchAnnotateFilesResponse {
+ // The list of file annotation responses, each response corresponding to each
+ // AnnotateFileRequest in BatchAnnotateFilesRequest.
+ repeated AnnotateFileResponse responses = 1;
+}
+
+// An offline file annotation request.
+message AsyncAnnotateFileRequest {
+ // Required. Information about the input file.
+ InputConfig input_config = 1;
+
+ // Required. Requested features.
+ repeated Feature features = 2;
+
+ // Additional context that may accompany the image(s) in the file.
+ ImageContext image_context = 3;
+
+ // Required. The desired output location and metadata (e.g. format).
+ OutputConfig output_config = 4;
+}
+
+// The response for a single offline file annotation request.
+message AsyncAnnotateFileResponse {
+ // The output location and metadata from AsyncAnnotateFileRequest.
+ OutputConfig output_config = 1;
+}
+
+// Request for async image annotation for a list of images.
+message AsyncBatchAnnotateImagesRequest {
+ // Individual image annotation requests for this batch.
+ repeated AnnotateImageRequest requests = 1;
+
+ // Required. The desired output location and metadata (e.g. format).
+ OutputConfig output_config = 2;
+}
+
+// Response to an async batch image annotation request.
+message AsyncBatchAnnotateImagesResponse {
+ // The output location and metadata from AsyncBatchAnnotateImagesRequest.
+ OutputConfig output_config = 1;
+}
+
+// Multiple async file annotation requests are batched into a single service
+// call.
+message AsyncBatchAnnotateFilesRequest {
+ // Individual async file annotation requests for this batch.
+ repeated AsyncAnnotateFileRequest requests = 1;
+}
+
+// Response to an async batch file annotation request.
+message AsyncBatchAnnotateFilesResponse {
+ // The list of file annotation responses, one for each request in
+ // AsyncBatchAnnotateFilesRequest.
+ repeated AsyncAnnotateFileResponse responses = 1;
+}
+
+// The desired input location and metadata.
+message InputConfig {
+ // The Google Cloud Storage location to read the input from.
+ GcsSource gcs_source = 1;
+
+ // File content, represented as a stream of bytes.
+ // Note: As with all `bytes` fields, protobuffers use a pure binary
+ // representation, whereas JSON representations use base64.
+ //
+ // Currently, this field only works for BatchAnnotateFiles requests. It does
+ // not work for AsyncBatchAnnotateFiles requests.
+ bytes content = 3;
+
+ // The type of the file. Currently only "application/pdf" and "image/tiff"
+ // are supported. Wildcards are not supported.
+ string mime_type = 2;
+}
+
+// The desired output location and metadata.
+message OutputConfig {
+ // The Google Cloud Storage location to write the output(s) to.
+ GcsDestination gcs_destination = 1;
+
+ // The max number of response protos to put into each output JSON file on
+ // Google Cloud Storage.
+ // The valid range is [1, 100]. If not specified, the default value is 20.
+ //
+ // For example, for one pdf file with 100 pages, 100 response protos will
+ // be generated. If `batch_size` = 20, then 5 json files each
+ // containing 20 response protos will be written under the prefix
+ // `gcs_destination`.`uri`.
+ //
+ // Currently, batch_size only applies to GcsDestination, with potential future
+ // support for other output configurations.
+ int32 batch_size = 2;
+}
+
+// The Google Cloud Storage location where the input will be read from.
+message GcsSource {
+ // Google Cloud Storage URI for the input file. This must only be a
+ // Google Cloud Storage object. Wildcards are not currently supported.
+ string uri = 1;
+}
+
+// The Google Cloud Storage location where the output will be written to.
+message GcsDestination {
+ // Google Cloud Storage URI where the results will be stored. Results will
+ // be in JSON format and preceded by its corresponding input URI. This field
+ // can either represent a single file, or a prefix for multiple outputs.
+ // Prefixes must end in a `/`.
+ //
+ // Examples:
+ //
+ // * File: gs://bucket-name/filename.json
+ // * Prefix: gs://bucket-name/prefix/here/
+ // * File: gs://bucket-name/prefix/here
+ //
+ // If multiple outputs, each response is still AnnotateFileResponse, each of
+ // which contains some subset of the full list of AnnotateImageResponse.
+ // Multiple outputs can happen if, for example, the output JSON is too large
+ // and overflows into multiple sharded files.
+ string uri = 1;
+}
+
+// Contains metadata for the BatchAnnotateImages operation.
+message OperationMetadata {
+ // Batch operation states.
+ enum State {
+ // Invalid.
+ STATE_UNSPECIFIED = 0;
+
+ // Request is received.
+ CREATED = 1;
+
+ // Request is actively being processed.
+ RUNNING = 2;
+
+ // The batch processing is done.
+ DONE = 3;
+
+ // The batch processing was cancelled.
+ CANCELLED = 4;
+ }
+
+ // Current state of the batch operation.
+ State state = 1;
+
+ // The time when the batch request was received.
+ google.protobuf.Timestamp create_time = 5;
+
+ // The time when the operation result was last updated.
+ google.protobuf.Timestamp update_time = 6;
+}
diff --git a/google/cloud/vision/v1p4beta1/product_search.proto b/google/cloud/vision/v1p4beta1/product_search.proto
new file mode 100644
index 000000000..007804f31
--- /dev/null
+++ b/google/cloud/vision/v1p4beta1/product_search.proto
@@ -0,0 +1,98 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p4beta1;
+
+import "google/api/annotations.proto";
+import "google/cloud/vision/v1p4beta1/geometry.proto";
+import "google/cloud/vision/v1p4beta1/product_search_service.proto";
+import "google/protobuf/timestamp.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p4beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "ProductSearchProto";
+option java_package = "com.google.cloud.vision.v1p4beta1";
+option objc_class_prefix = "GCVN";
+
+// Parameters for a product search request.
+message ProductSearchParams {
+ // The bounding polygon around the area of interest in the image.
+ // Optional. If it is not specified, system discretion will be applied.
+ BoundingPoly bounding_poly = 9;
+
+ // The resource name of a
+ // [ProductSet][google.cloud.vision.v1p4beta1.ProductSet] to be searched for
+ // similar images.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
+ string product_set = 6;
+
+ // The list of product categories to search in. Currently, we only consider
+ // the first category, and either "homegoods", "apparel", or "toys" should be
+ // specified.
+ repeated string product_categories = 7;
+
+ // The filtering expression. This can be used to restrict search results based
+ // on Product labels. We currently support an AND of OR of key-value
+ // expressions, where each expression within an OR must have the same key.
+ //
+ // For example, "(color = red OR color = blue) AND brand = Google" is
+ // acceptable, but not "(color = red OR brand = Google)" or "color: red".
+ string filter = 8;
+}
+
+// Results for a product search request.
+message ProductSearchResults {
+ // Information about a product.
+ message Result {
+ // The Product.
+ Product product = 1;
+
+ // A confidence level on the match, ranging from 0 (no confidence) to
+ // 1 (full confidence).
+ float score = 2;
+
+ // The resource name of the image from the product that is the closest match
+ // to the query.
+ string image = 3;
+ }
+
+ // Information about the products similar to a single product in a query
+ // image.
+ message GroupedResult {
+ // The bounding polygon around the product detected in the query image.
+ BoundingPoly bounding_poly = 1;
+
+ // List of results, one for each product match.
+ repeated Result results = 2;
+ }
+
+ // Timestamp of the index which provided these results. Changes made after
+ // this time are not reflected in the current results.
+ google.protobuf.Timestamp index_time = 2;
+
+ // List of results, one for each product match.
+ repeated Result results = 5;
+
+ // List of results grouped by products detected in the query image. Each entry
+ // corresponds to one bounding polygon in the query image, and contains the
+ // matching products specific to that region. There may be duplicate product
+ // matches in the union of all the per-product results.
+ repeated GroupedResult product_grouped_results = 6;
+}
diff --git a/google/cloud/vision/v1p4beta1/product_search_service.proto b/google/cloud/vision/v1p4beta1/product_search_service.proto
new file mode 100644
index 000000000..e3b23c7b3
--- /dev/null
+++ b/google/cloud/vision/v1p4beta1/product_search_service.proto
@@ -0,0 +1,852 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p4beta1;
+
+import "google/api/annotations.proto";
+import "google/cloud/vision/v1p4beta1/geometry.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p4beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "ProductSearchServiceProto";
+option java_package = "com.google.cloud.vision.v1p4beta1";
+option objc_class_prefix = "GCVN";
+
+// Manages Products and ProductSets of reference images for use in product
+// search. It uses the following resource model:
+//
+// - The API has a collection of
+// [ProductSet][google.cloud.vision.v1p4beta1.ProductSet] resources, named
+// `projects/*/locations/*/productSets/*`, which acts as a way to put different
+// products into groups to limit identification.
+//
+// In parallel,
+//
+// - The API has a collection of
+// [Product][google.cloud.vision.v1p4beta1.Product] resources, named
+// `projects/*/locations/*/products/*`
+//
+// - Each [Product][google.cloud.vision.v1p4beta1.Product] has a collection of
+// [ReferenceImage][google.cloud.vision.v1p4beta1.ReferenceImage] resources,
+// named
+// `projects/*/locations/*/products/*/referenceImages/*`
+service ProductSearch {
+ // Creates and returns a new ProductSet resource.
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if display_name is missing, or is longer than
+ // 4096 characters.
+ rpc CreateProductSet(CreateProductSetRequest) returns (ProductSet) {
+ option (google.api.http) = {
+ post: "/v1p4beta1/{parent=projects/*/locations/*}/productSets"
+ body: "product_set"
+ };
+ }
+
+ // Lists ProductSets in an unspecified order.
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if page_size is greater than 100, or less
+ // than 1.
+ rpc ListProductSets(ListProductSetsRequest)
+ returns (ListProductSetsResponse) {
+ option (google.api.http) = {
+ get: "/v1p4beta1/{parent=projects/*/locations/*}/productSets"
+ };
+ }
+
+ // Gets information associated with a ProductSet.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the ProductSet does not exist.
+ rpc GetProductSet(GetProductSetRequest) returns (ProductSet) {
+ option (google.api.http) = {
+ get: "/v1p4beta1/{name=projects/*/locations/*/productSets/*}"
+ };
+ }
+
+ // Makes changes to a ProductSet resource.
+ // Only display_name can be updated currently.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the ProductSet does not exist.
+ // * Returns INVALID_ARGUMENT if display_name is present in update_mask but
+ // missing from the request or longer than 4096 characters.
+ rpc UpdateProductSet(UpdateProductSetRequest) returns (ProductSet) {
+ option (google.api.http) = {
+ patch: "/v1p4beta1/{product_set.name=projects/*/locations/*/productSets/*}"
+ body: "product_set"
+ };
+ }
+
+ // Permanently deletes a ProductSet. Products and ReferenceImages in the
+ // ProductSet are not deleted.
+ //
+ // The actual image files are not deleted from Google Cloud Storage.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the ProductSet does not exist.
+ rpc DeleteProductSet(DeleteProductSetRequest)
+ returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ delete: "/v1p4beta1/{name=projects/*/locations/*/productSets/*}"
+ };
+ }
+
+ // Creates and returns a new product resource.
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if display_name is missing or longer than 4096
+ // characters.
+ // * Returns INVALID_ARGUMENT if description is longer than 4096 characters.
+ // * Returns INVALID_ARGUMENT if product_category is missing or invalid.
+ rpc CreateProduct(CreateProductRequest) returns (Product) {
+ option (google.api.http) = {
+ post: "/v1p4beta1/{parent=projects/*/locations/*}/products"
+ body: "product"
+ };
+ }
+
+ // Lists products in an unspecified order.
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if page_size is greater than 100 or less than 1.
+ rpc ListProducts(ListProductsRequest) returns (ListProductsResponse) {
+ option (google.api.http) = {
+ get: "/v1p4beta1/{parent=projects/*/locations/*}/products"
+ };
+ }
+
+ // Gets information associated with a Product.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the Product does not exist.
+ rpc GetProduct(GetProductRequest) returns (Product) {
+ option (google.api.http) = {
+ get: "/v1p4beta1/{name=projects/*/locations/*/products/*}"
+ };
+ }
+
+ // Makes changes to a Product resource.
+ // Only the `display_name`, `description`, and `labels` fields can be updated
+ // right now.
+ //
+ // If labels are updated, the change will not be reflected in queries until
+ // the next index time.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the Product does not exist.
+ // * Returns INVALID_ARGUMENT if display_name is present in update_mask but is
+ // missing from the request or longer than 4096 characters.
+ // * Returns INVALID_ARGUMENT if description is present in update_mask but is
+ // longer than 4096 characters.
+ // * Returns INVALID_ARGUMENT if product_category is present in update_mask.
+ rpc UpdateProduct(UpdateProductRequest) returns (Product) {
+ option (google.api.http) = {
+ patch: "/v1p4beta1/{product.name=projects/*/locations/*/products/*}"
+ body: "product"
+ };
+ }
+
+ // Permanently deletes a product and its reference images.
+ //
+ // Metadata of the product and all its images will be deleted right away, but
+ // search queries against ProductSets containing the product may still work
+ // until all related caches are refreshed.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the product does not exist.
+ rpc DeleteProduct(DeleteProductRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ delete: "/v1p4beta1/{name=projects/*/locations/*/products/*}"
+ };
+ }
+
+ // Creates and returns a new ReferenceImage resource.
+ //
+ // The `bounding_poly` field is optional. If `bounding_poly` is not specified,
+ // the system will try to detect regions of interest in the image that are
+ // compatible with the product_category on the parent product. If it is
+ // specified, detection is ALWAYS skipped. The system converts polygons into
+ // non-rotated rectangles.
+ //
+ // Note that the pipeline will resize the image if the image resolution is too
+ // large to process (above 50MP).
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if the image_uri is missing or longer than 4096
+ // characters.
+ // * Returns INVALID_ARGUMENT if the product does not exist.
+ // * Returns INVALID_ARGUMENT if bounding_poly is not provided, and nothing
+ // compatible with the parent product's product_category is detected.
+ // * Returns INVALID_ARGUMENT if bounding_poly contains more than 10 polygons.
+ rpc CreateReferenceImage(CreateReferenceImageRequest)
+ returns (ReferenceImage) {
+ option (google.api.http) = {
+ post: "/v1p4beta1/{parent=projects/*/locations/*/products/*}/referenceImages"
+ body: "reference_image"
+ };
+ }
+
+ // Permanently deletes a reference image.
+ //
+ // The image metadata will be deleted right away, but search queries
+ // against ProductSets containing the image may still work until all related
+ // caches are refreshed.
+ //
+ // The actual image files are not deleted from Google Cloud Storage.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the reference image does not exist.
+ rpc DeleteReferenceImage(DeleteReferenceImageRequest)
+ returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ delete: "/v1p4beta1/{name=projects/*/locations/*/products/*/referenceImages/*}"
+ };
+ }
+
+ // Lists reference images.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the parent product does not exist.
+ // * Returns INVALID_ARGUMENT if the page_size is greater than 100, or less
+ // than 1.
+ rpc ListReferenceImages(ListReferenceImagesRequest)
+ returns (ListReferenceImagesResponse) {
+ option (google.api.http) = {
+ get: "/v1p4beta1/{parent=projects/*/locations/*/products/*}/referenceImages"
+ };
+ }
+
+ // Gets information associated with a ReferenceImage.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the specified image does not exist.
+ rpc GetReferenceImage(GetReferenceImageRequest) returns (ReferenceImage) {
+ option (google.api.http) = {
+ get: "/v1p4beta1/{name=projects/*/locations/*/products/*/referenceImages/*}"
+ };
+ }
+
+ // Adds a Product to the specified ProductSet. If the Product is already
+ // present, no change is made.
+ //
+ // One Product can be added to at most 100 ProductSets.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the Product or the ProductSet doesn't exist.
+ rpc AddProductToProductSet(AddProductToProductSetRequest)
+ returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ post: "/v1p4beta1/{name=projects/*/locations/*/productSets/*}:addProduct"
+ body: "*"
+ };
+ }
+
+ // Removes a Product from the specified ProductSet.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND If the Product is not found under the ProductSet.
+ rpc RemoveProductFromProductSet(RemoveProductFromProductSetRequest)
+ returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ post: "/v1p4beta1/{name=projects/*/locations/*/productSets/*}:removeProduct"
+ body: "*"
+ };
+ }
+
+ // Lists the Products in a ProductSet, in an unspecified order. If the
+ // ProductSet does not exist, the products field of the response will be
+ // empty.
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if page_size is greater than 100 or less than 1.
+ rpc ListProductsInProductSet(ListProductsInProductSetRequest)
+ returns (ListProductsInProductSetResponse) {
+ option (google.api.http) = {
+ get: "/v1p4beta1/{name=projects/*/locations/*/productSets/*}/products"
+ };
+ }
+
+ // Asynchronous API that imports a list of reference images to specified
+ // product sets based on a list of image information.
+ //
+ // The [google.longrunning.Operation][google.longrunning.Operation] API can be
+ // used to keep track of the progress and results of the request.
+ // `Operation.metadata` contains `BatchOperationMetadata`. (progress)
+ // `Operation.response` contains `ImportProductSetsResponse`. (results)
+ //
+ // The input source of this method is a csv file on Google Cloud Storage.
+ // For the format of the csv file please see
+ // [ImportProductSetsGcsSource.csv_file_uri][google.cloud.vision.v1p4beta1.ImportProductSetsGcsSource.csv_file_uri].
+ rpc ImportProductSets(ImportProductSetsRequest)
+ returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1p4beta1/{parent=projects/*/locations/*}/productSets:import"
+ body: "*"
+ };
+ }
+}
+
+// A Product contains ReferenceImages.
+message Product {
+ // A product label represented as a key-value pair.
+ message KeyValue {
+ // The key of the label attached to the product. Cannot be empty and cannot
+ // exceed 128 bytes.
+ string key = 1;
+
+ // The value of the label attached to the product. Cannot be empty and
+ // cannot exceed 128 bytes.
+ string value = 2;
+ }
+
+ // The resource name of the product.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
+ //
+ // This field is ignored when creating a product.
+ string name = 1;
+
+ // The user-provided name for this Product. Must not be empty. Must be at most
+ // 4096 characters long.
+ string display_name = 2;
+
+ // User-provided metadata to be stored with this product. Must be at most 4096
+ // characters long.
+ string description = 3;
+
+ // The category for the product identified by the reference image. This should
+ // be either "homegoods", "apparel", or "toys".
+ //
+ // This field is immutable.
+ string product_category = 4;
+
+ // Key-value pairs that can be attached to a product. At query time,
+ // constraints can be specified based on the product_labels.
+ //
+ // Note that integer values can be provided as strings, e.g. "1199". Only
+ // strings with integer values can match a range-based restriction which is
+ // to be supported soon.
+ //
+ // Multiple values can be assigned to the same key. One product may have up to
+ // 100 product_labels.
+ repeated KeyValue product_labels = 5;
+}
+
+// A ProductSet contains Products. A ProductSet can contain a maximum of 1
+// million reference images. If the limit is exceeded, periodic indexing will
+// fail.
+message ProductSet {
+ // The resource name of the ProductSet.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
+ //
+ // This field is ignored when creating a ProductSet.
+ string name = 1;
+
+ // The user-provided name for this ProductSet. Must not be empty. Must be at
+ // most 4096 characters long.
+ string display_name = 2;
+
+ // Output only. The time at which this ProductSet was last indexed. Query
+ // results will reflect all updates before this time. If this ProductSet has
+ // never been indexed, this timestamp is the default value
+ // "1970-01-01T00:00:00Z".
+ //
+ // This field is ignored when creating a ProductSet.
+ google.protobuf.Timestamp index_time = 3;
+
+ // Output only. If there was an error with indexing the product set, the field
+ // is populated.
+ //
+ // This field is ignored when creating a ProductSet.
+ google.rpc.Status index_error = 4;
+}
+
+// A `ReferenceImage` represents a product image and its associated metadata,
+// such as bounding boxes.
+message ReferenceImage {
+ // The resource name of the reference image.
+ //
+ // Format is:
+ //
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID/referenceImages/IMAGE_ID`.
+ //
+ // This field is ignored when creating a reference image.
+ string name = 1;
+
+ // The Google Cloud Storage URI of the reference image.
+ //
+ // The URI must start with `gs://`.
+ //
+ // Required.
+ string uri = 2;
+
+ // Bounding polygons around the areas of interest in the reference image.
+ // Optional. If this field is empty, the system will try to detect regions of
+ // interest. At most 10 bounding polygons will be used.
+ //
+ // The provided shape is converted into a non-rotated rectangle. Once
+ // converted, the small edge of the rectangle must be greater than or equal
+ // to 300 pixels. The aspect ratio must be 1:4 or less (i.e. 1:3 is ok; 1:5
+ // is not).
+ repeated BoundingPoly bounding_polys = 3;
+}
+
+// Request message for the `CreateProduct` method.
+message CreateProductRequest {
+ // The project in which the Product should be created.
+ //
+ // Format is
+ // `projects/PROJECT_ID/locations/LOC_ID`.
+ string parent = 1;
+
+ // The product to create.
+ Product product = 2;
+
+ // A user-supplied resource id for this Product. If set, the server will
+ // attempt to use this value as the resource id. If it is already in use, an
+ // error is returned with code ALREADY_EXISTS. Must be at most 128 characters
+ // long. It cannot contain the character `/`.
+ string product_id = 3;
+}
+
+// Request message for the `ListProducts` method.
+message ListProductsRequest {
+ // The project OR ProductSet from which Products should be listed.
+ //
+ // Format:
+ // `projects/PROJECT_ID/locations/LOC_ID`
+ string parent = 1;
+
+ // The maximum number of items to return. Default 10, maximum 100.
+ int32 page_size = 2;
+
+ // The next_page_token returned from a previous List request, if any.
+ string page_token = 3;
+}
+
+// Response message for the `ListProducts` method.
+message ListProductsResponse {
+ // List of products.
+ repeated Product products = 1;
+
+ // Token to retrieve the next page of results, or empty if there are no more
+ // results in the list.
+ string next_page_token = 2;
+}
+
+// Request message for the `GetProduct` method.
+message GetProductRequest {
+ // Resource name of the Product to get.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ string name = 1;
+}
+
+// Request message for the `UpdateProduct` method.
+message UpdateProductRequest {
+ // The Product resource which replaces the one on the server.
+ // product.name is immutable.
+ Product product = 1;
+
+ // The [FieldMask][google.protobuf.FieldMask] that specifies which fields
+ // to update.
+ // If update_mask isn't specified, all mutable fields are to be updated.
+ // Valid mask paths include `product_labels`, `display_name`, and
+ // `description`.
+ google.protobuf.FieldMask update_mask = 2;
+}
+
+// Request message for the `DeleteProduct` method.
+message DeleteProductRequest {
+ // Resource name of product to delete.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ string name = 1;
+}
+
+// Request message for the `CreateProductSet` method.
+message CreateProductSetRequest {
+ // The project in which the ProductSet should be created.
+ //
+ // Format is `projects/PROJECT_ID/locations/LOC_ID`.
+ string parent = 1;
+
+ // The ProductSet to create.
+ ProductSet product_set = 2;
+
+ // A user-supplied resource id for this ProductSet. If set, the server will
+ // attempt to use this value as the resource id. If it is already in use, an
+ // error is returned with code ALREADY_EXISTS. Must be at most 128 characters
+ // long. It cannot contain the character `/`.
+ string product_set_id = 3;
+}
+
+// Request message for the `ListProductSets` method.
+message ListProductSetsRequest {
+ // The project from which ProductSets should be listed.
+ //
+ // Format is `projects/PROJECT_ID/locations/LOC_ID`.
+ string parent = 1;
+
+ // The maximum number of items to return. Default 10, maximum 100.
+ int32 page_size = 2;
+
+ // The next_page_token returned from a previous List request, if any.
+ string page_token = 3;
+}
+
+// Response message for the `ListProductSets` method.
+message ListProductSetsResponse {
+ // List of ProductSets.
+ repeated ProductSet product_sets = 1;
+
+ // Token to retrieve the next page of results, or empty if there are no more
+ // results in the list.
+ string next_page_token = 2;
+}
+
+// Request message for the `GetProductSet` method.
+message GetProductSetRequest {
+ // Resource name of the ProductSet to get.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOG_ID/productSets/PRODUCT_SET_ID`
+ string name = 1;
+}
+
+// Request message for the `UpdateProductSet` method.
+message UpdateProductSetRequest {
+ // The ProductSet resource which replaces the one on the server.
+ ProductSet product_set = 1;
+
+ // The [FieldMask][google.protobuf.FieldMask] that specifies which fields to
+ // update.
+ // If update_mask isn't specified, all mutable fields are to be updated.
+ // Valid mask path is `display_name`.
+ google.protobuf.FieldMask update_mask = 2;
+}
+
+// Request message for the `DeleteProductSet` method.
+message DeleteProductSetRequest {
+ // Resource name of the ProductSet to delete.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ string name = 1;
+}
+
+// Request message for the `CreateReferenceImage` method.
+message CreateReferenceImageRequest {
+ // Resource name of the product in which to create the reference image.
+ //
+ // Format is
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
+ string parent = 1;
+
+ // The reference image to create.
+ // If an image ID is specified, it is ignored.
+ ReferenceImage reference_image = 2;
+
+ // A user-supplied resource id for the ReferenceImage to be added. If set,
+ // the server will attempt to use this value as the resource id. If it is
+ // already in use, an error is returned with code ALREADY_EXISTS. Must be at
+ // most 128 characters long. It cannot contain the character `/`.
+ string reference_image_id = 3;
+}
+
+// Request message for the `ListReferenceImages` method.
+message ListReferenceImagesRequest {
+ // Resource name of the product containing the reference images.
+ //
+ // Format is
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
+ string parent = 1;
+
+ // The maximum number of items to return. Default 10, maximum 100.
+ int32 page_size = 2;
+
+ // A token identifying a page of results to be returned. This is the value
+ // of `nextPageToken` returned in a previous reference image list request.
+ //
+ // Defaults to the first page if not specified.
+ string page_token = 3;
+}
+
+// Response message for the `ListReferenceImages` method.
+message ListReferenceImagesResponse {
+ // The list of reference images.
+ repeated ReferenceImage reference_images = 1;
+
+ // The maximum number of items to return. Default 10, maximum 100.
+ int32 page_size = 2;
+
+ // The next_page_token returned from a previous List request, if any.
+ string next_page_token = 3;
+}
+
+// Request message for the `GetReferenceImage` method.
+message GetReferenceImageRequest {
+ // The resource name of the ReferenceImage to get.
+ //
+ // Format is:
+ //
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID/referenceImages/IMAGE_ID`.
+ string name = 1;
+}
+
+// Request message for the `DeleteReferenceImage` method.
+message DeleteReferenceImageRequest {
+ // The resource name of the reference image to delete.
+ //
+ // Format is:
+ //
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID/referenceImages/IMAGE_ID`
+ string name = 1;
+}
+
+// Request message for the `AddProductToProductSet` method.
+message AddProductToProductSetRequest {
+ // The resource name for the ProductSet to modify.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ string name = 1;
+
+ // The resource name for the Product to be added to this ProductSet.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ string product = 2;
+}
+
+// Request message for the `RemoveProductFromProductSet` method.
+message RemoveProductFromProductSetRequest {
+ // The resource name for the ProductSet to modify.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ string name = 1;
+
+ // The resource name for the Product to be removed from this ProductSet.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ string product = 2;
+}
+
+// Request message for the `ListProductsInProductSet` method.
+message ListProductsInProductSetRequest {
+ // The ProductSet resource for which to retrieve Products.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ string name = 1;
+
+ // The maximum number of items to return. Default 10, maximum 100.
+ int32 page_size = 2;
+
+ // The next_page_token returned from a previous List request, if any.
+ string page_token = 3;
+}
+
+// Response message for the `ListProductsInProductSet` method.
+message ListProductsInProductSetResponse {
+ // The list of Products.
+ repeated Product products = 1;
+
+ // Token to retrieve the next page of results, or empty if there are no more
+ // results in the list.
+ string next_page_token = 2;
+}
+
+// The Google Cloud Storage location for a csv file which preserves a list of
+// ImportProductSetRequests in each line.
+message ImportProductSetsGcsSource {
+ // The Google Cloud Storage URI of the input csv file.
+ //
+ // The URI must start with `gs://`.
+ //
+ // The format of the input csv file should be one image per line.
+ // In each line, there are 8 columns.
+ //
+ // 1. image-uri
+ // 2. image-id
+ // 3. product-set-id
+ // 4. product-id
+ // 5. product-category
+ // 6. product-display-name
+ // 7. labels
+ // 8. bounding-poly
+ //
+ // The `image-uri`, `product-set-id`, `product-id`, and `product-category`
+ // columns are required. All other columns are optional.
+ //
+ // If the `ProductSet` or `Product` specified by the `product-set-id` and
+ // `product-id` values does not exist, then the system will create a new
+ // `ProductSet` or `Product` for the image. In this case, the
+ // `product-display-name` column refers to
+ // [display_name][google.cloud.vision.v1p4beta1.Product.display_name], the
+ // `product-category` column refers to
+ // [product_category][google.cloud.vision.v1p4beta1.Product.product_category],
+ // and the `labels` column refers to
+ // [product_labels][google.cloud.vision.v1p4beta1.Product.product_labels].
+ //
+ // The `image-id` column is optional but must be unique if provided. If it is
+ // empty, the system will automatically assign a unique id to the image.
+ //
+ // The `product-display-name` column is optional. If it is empty, the system
+ // sets the [display_name][google.cloud.vision.v1p4beta1.Product.display_name]
+ // field for the product to a space (" "). You can update the `display_name`
+ // later by using the API.
+ //
+ // If a `Product` with the specified `product-id` already exists, then the
+ // system ignores the `product-display-name`, `product-category`, and `labels`
+ // columns.
+ //
+ // The `labels` column (optional) is a line containing a list of
+ // comma-separated key-value pairs, in the following format:
+ //
+ // "key_1=value_1,key_2=value_2,...,key_n=value_n"
+ //
+ // The `bounding-poly` column (optional) identifies one region of
+ // interest from the image in the same manner as `CreateReferenceImage`. If
+ // you do not specify the `bounding-poly` column, then the system will try to
+ // detect regions of interest automatically.
+ //
+ // At most one `bounding-poly` column is allowed per line. If the image
+ // contains multiple regions of interest, add a line to the CSV file that
+ // includes the same product information, and the `bounding-poly` values for
+ // each region of interest.
+ //
+ // The `bounding-poly` column must contain an even number of comma-separated
+ // numbers, in the format "p1_x,p1_y,p2_x,p2_y,...,pn_x,pn_y". Use
+ // non-negative integers for absolute bounding polygons, and float values
+ // in [0, 1] for normalized bounding polygons.
+ //
+ // The system will resize the image if the image resolution is too
+ // large to process (larger than 20MP).
+ string csv_file_uri = 1;
+}
+
+// The input content for the `ImportProductSets` method.
+message ImportProductSetsInputConfig {
+ // The source of the input.
+ oneof source {
+ // The Google Cloud Storage location for a csv file which preserves a list
+ // of ImportProductSetRequests in each line.
+ ImportProductSetsGcsSource gcs_source = 1;
+ }
+}
+
+// Request message for the `ImportProductSets` method.
+message ImportProductSetsRequest {
+ // The project in which the ProductSets should be imported.
+ //
+ // Format is `projects/PROJECT_ID/locations/LOC_ID`.
+ string parent = 1;
+
+ // The input content for the list of requests.
+ ImportProductSetsInputConfig input_config = 2;
+}
+
+// Response message for the `ImportProductSets` method.
+//
+// This message is returned by the
+// [google.longrunning.Operations.GetOperation][google.longrunning.Operations.GetOperation]
+// method in the returned
+// [google.longrunning.Operation.response][google.longrunning.Operation.response]
+// field.
+message ImportProductSetsResponse {
+ // The list of reference_images that are imported successfully.
+ repeated ReferenceImage reference_images = 1;
+
+ // The rpc status for each ImportProductSet request, including both successes
+ // and errors.
+ //
+ // The number of statuses here matches the number of lines in the csv file,
+ // and statuses[i] stores the success or failure status of processing the i-th
+ // line of the csv, starting from line 0.
+ repeated google.rpc.Status statuses = 2;
+}
+
+// Metadata for the batch operations such as the current state.
+//
+// This is included in the `metadata` field of the `Operation` returned by the
+// `GetOperation` call of the `google::longrunning::Operations` service.
+message BatchOperationMetadata {
+ // Enumerates the possible states that the batch request can be in.
+ enum State {
+ // Invalid.
+ STATE_UNSPECIFIED = 0;
+
+ // Request is actively being processed.
+ PROCESSING = 1;
+
+ // The request is done and at least one item has been successfully
+ // processed.
+ SUCCESSFUL = 2;
+
+ // The request is done and no item has been successfully processed.
+ FAILED = 3;
+
+ // The request is done after the longrunning.Operations.CancelOperation has
+ // been called by the user. Any records that were processed before the
+ // cancel command are output as specified in the request.
+ CANCELLED = 4;
+ }
+
+ // The current state of the batch operation.
+ State state = 1;
+
+ // The time when the batch request was submitted to the server.
+ google.protobuf.Timestamp submit_time = 2;
+
+ // The time when the batch request is finished and
+ // [google.longrunning.Operation.done][google.longrunning.Operation.done] is
+ // set to true.
+ google.protobuf.Timestamp end_time = 3;
+}
diff --git a/google/cloud/vision/v1p4beta1/text_annotation.proto b/google/cloud/vision/v1p4beta1/text_annotation.proto
new file mode 100644
index 000000000..542677f27
--- /dev/null
+++ b/google/cloud/vision/v1p4beta1/text_annotation.proto
@@ -0,0 +1,261 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p4beta1;
+
+import "google/api/annotations.proto";
+import "google/cloud/vision/v1p4beta1/geometry.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p4beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "TextAnnotationProto";
+option java_package = "com.google.cloud.vision.v1p4beta1";
+option objc_class_prefix = "GCVN";
+
+// TextAnnotation contains a structured representation of OCR extracted text.
+// The hierarchy of an OCR extracted text structure is like this:
+// TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
+// Each structural component, starting from Page, may further have their own
+// properties. Properties describe detected languages, breaks etc.. Please refer
+// to the
+// [TextAnnotation.TextProperty][google.cloud.vision.v1p4beta1.TextAnnotation.TextProperty]
+// message definition below for more detail.
+message TextAnnotation {
+ // Detected language for a structural component.
+ message DetectedLanguage {
+ // The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+ // information, see
+ // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+ string language_code = 1;
+
+ // Confidence of detected language. Range [0, 1].
+ float confidence = 2;
+ }
+
+ // Detected start or end of a structural component.
+ message DetectedBreak {
+ // Enum to denote the type of break found. New line, space etc.
+ enum BreakType {
+ // Unknown break label type.
+ UNKNOWN = 0;
+
+ // Regular space.
+ SPACE = 1;
+
+ // Sure space (very wide).
+ SURE_SPACE = 2;
+
+ // Line-wrapping break.
+ EOL_SURE_SPACE = 3;
+
+ // End-line hyphen that is not present in text; does not co-occur with
+ // `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`.
+ HYPHEN = 4;
+
+ // Line break that ends a paragraph.
+ LINE_BREAK = 5;
+ }
+
+ // Detected break type.
+ BreakType type = 1;
+
+ // True if break prepends the element.
+ bool is_prefix = 2;
+ }
+
+ // Additional information detected on the structural component.
+ message TextProperty {
+ // A list of detected languages together with confidence.
+ repeated DetectedLanguage detected_languages = 1;
+
+ // Detected start or end of a text segment.
+ DetectedBreak detected_break = 2;
+ }
+
+ // List of pages detected by OCR.
+ repeated Page pages = 1;
+
+ // UTF-8 text detected on the pages.
+ string text = 2;
+}
+
+// Detected page from OCR.
+message Page {
+ // Additional information detected on the page.
+ TextAnnotation.TextProperty property = 1;
+
+ // Page width. For PDFs the unit is points. For images (including
+ // TIFFs) the unit is pixels.
+ int32 width = 2;
+
+ // Page height. For PDFs the unit is points. For images (including
+ // TIFFs) the unit is pixels.
+ int32 height = 3;
+
+ // List of blocks of text, images etc on this page.
+ repeated Block blocks = 4;
+
+ // Confidence of the OCR results on the page. Range [0, 1].
+ float confidence = 5;
+}
+
+// Logical element on the page.
+message Block {
+ // Type of a block (text, image etc) as identified by OCR.
+ enum BlockType {
+ // Unknown block type.
+ UNKNOWN = 0;
+
+ // Regular text block.
+ TEXT = 1;
+
+ // Table block.
+ TABLE = 2;
+
+ // Image block.
+ PICTURE = 3;
+
+ // Horizontal/vertical line box.
+ RULER = 4;
+
+ // Barcode block.
+ BARCODE = 5;
+ }
+
+ // Additional information detected for the block.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the block.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ //
+ // * when the text is horizontal it might look like:
+ //
+ // 0----1
+ // | |
+ // 3----2
+ //
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ //
+ // 2----3
+ // | |
+ // 1----0
+ //
+ // and the vertex order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // List of paragraphs in this block (if this blocks is of type text).
+ repeated Paragraph paragraphs = 3;
+
+ // Detected block type (text, image etc) for this block.
+ BlockType block_type = 4;
+
+ // Confidence of the OCR results on the block. Range [0, 1].
+ float confidence = 5;
+}
+
+// Structural unit of text representing a number of words in certain order.
+message Paragraph {
+ // Additional information detected for the paragraph.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the paragraph.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ // * when the text is horizontal it might look like:
+ // 0----1
+ // | |
+ // 3----2
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ // 2----3
+ // | |
+ // 1----0
+ // and the vertex order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // List of words in this paragraph.
+ repeated Word words = 3;
+
+ // Confidence of the OCR results for the paragraph. Range [0, 1].
+ float confidence = 4;
+}
+
+// A word representation.
+message Word {
+ // Additional information detected for the word.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the word.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ // * when the text is horizontal it might look like:
+ // 0----1
+ // | |
+ // 3----2
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ // 2----3
+ // | |
+ // 1----0
+ // and the vertex order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // List of symbols in the word.
+ // The order of the symbols follows the natural reading order.
+ repeated Symbol symbols = 3;
+
+ // Confidence of the OCR results for the word. Range [0, 1].
+ float confidence = 4;
+}
+
+// A single symbol representation.
+message Symbol {
+ // Additional information detected for the symbol.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the symbol.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ // * when the text is horizontal it might look like:
+ // 0----1
+ // | |
+ // 3----2
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ // 2----3
+ // | |
+ // 1----0
+ // and the vertice order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // The actual UTF-8 representation of the symbol.
+ string text = 3;
+
+ // Confidence of the OCR results for the symbol. Range [0, 1].
+ float confidence = 4;
+}
diff --git a/google/cloud/vision/v1p4beta1/vision_gapic.yaml b/google/cloud/vision/v1p4beta1/vision_gapic.yaml
new file mode 100644
index 000000000..90d6f5000
--- /dev/null
+++ b/google/cloud/vision/v1p4beta1/vision_gapic.yaml
@@ -0,0 +1,603 @@
+type: com.google.api.codegen.ConfigProto
+config_schema_version: 1.0.0
+# The settings of generated code in a specific language.
+language_settings:
+ java:
+ package_name: com.google.cloud.vision.v1p4beta1
+ python:
+ package_name: google.cloud.vision_v1p4beta1.gapic
+ go:
+ package_name: cloud.google.com/go/vision/apiv1p4beta1
+ csharp:
+ package_name: Google.Cloud.Vision.V1p4beta1
+ ruby:
+ package_name: Google::Cloud::Vision::V1p4beta1
+ php:
+ package_name: Google\Cloud\Vision\V1p4beta1
+ nodejs:
+ package_name: vision.v1p4beta1
+# A list of API interface configurations.
+interfaces:
+ # The fully qualified name of the API interface.
+- name: google.cloud.vision.v1p4beta1.ProductSearch
+ # A list of resource collection configurations.
+ # Consists of a name_pattern and an entity_name.
+ # The name_pattern is a pattern to describe the names of the resources of this
+ # collection, using the platform's conventions for URI patterns. A generator
+ # may use this to generate methods to compose and decompose such names. The
+ # pattern should use named placeholders as in `shelves/{shelf}/books/{book}`;
+ # those will be taken as hints for the parameter names of the generated
+ # methods. If empty, no name methods are generated.
+ # The entity_name is the name to be used as a basis for generated methods and
+ # classes.
+ collections:
+ - name_pattern: projects/{project}/locations/{location}
+ entity_name: location
+ - name_pattern: projects/{project}/locations/{location}/productSets/{product_set}
+ entity_name: product_set
+ - name_pattern: projects/{project}/locations/{location}/products/{product}
+ entity_name: product
+ - name_pattern: projects/{project}/locations/{location}/products/{product}/referenceImages/{reference_image}
+ entity_name: reference_image
+ # Definition for retryable codes.
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - DEADLINE_EXCEEDED
+ - UNAVAILABLE
+ - name: non_idempotent
+ retry_codes: []
+ # Definition for retry/backoff parameters.
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 20000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 20000
+ total_timeout_millis: 600000
+ # A list of method configurations.
+ # Common properties:
+ #
+ # name - The simple name of the method.
+ #
+ # flattening - Specifies the configuration for parameter flattening.
+ # Describes the parameter groups for which a generator should produce method
+ # overloads which allow a client to directly pass request message fields as
+ # method parameters. This information may or may not be used, depending on
+ # the target language.
+ # Consists of groups, which each represent a list of parameters to be
+ # flattened. Each parameter listed must be a field of the request message.
+ #
+ # required_fields - Fields that are always required for a request to be
+ # valid.
+ #
+ # resource_name_treatment - An enum that specifies how to treat the resource
+ # name formats defined in the field_name_patterns and
+ # response_field_name_patterns fields.
+ # UNSET: default value
+ # NONE: the collection configs will not be used by the generated code.
+ # VALIDATE: string fields will be validated by the client against the
+ # specified resource name formats.
+ # STATIC_TYPES: the client will use generated types for resource names.
+ #
+ # page_streaming - Specifies the configuration for paging.
+ # Describes information for generating a method which transforms a paging
+ # list RPC into a stream of resources.
+ # Consists of a request and a response.
+ # The request specifies request information of the list method. It defines
+ # which fields match the paging pattern in the request. The request consists
+ # of a page_size_field and a token_field. The page_size_field is the name of
+ # the optional field specifying the maximum number of elements to be
+ # returned in the response. The token_field is the name of the field in the
+ # request containing the page token.
+ # The response specifies response information of the list method. It defines
+ # which fields match the paging pattern in the response. The response
+ # consists of a token_field and a resources_field. The token_field is the
+ # name of the field in the response containing the next page token. The
+ # resources_field is the name of the field in the response containing the
+ # list of resources belonging to the page.
+ #
+ # retry_codes_name - Specifies the configuration for retryable codes. The
+ # name must be defined in interfaces.retry_codes_def.
+ #
+ # retry_params_name - Specifies the configuration for retry/backoff
+ # parameters. The name must be defined in interfaces.retry_params_def.
+ #
+ # field_name_patterns - Maps the field name of the request type to
+ # entity_name of interfaces.collections.
+ # Specifies the string pattern that the field must follow.
+ #
+ # timeout_millis - Specifies the default timeout for a non-retrying call. If
+ # the call is retrying, refer to retry_params_name instead.
+ methods:
+ - name: CreateProductSet
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - product_set
+ - product_set_id
+ required_fields:
+ - parent
+ - product_set
+ - product_set_id
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: location
+ timeout_millis: 60000
+ - name: ListProductSets
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ required_fields:
+ - parent
+ resource_name_treatment: STATIC_TYPES
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: product_sets
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: location
+ timeout_millis: 60000
+ - name: GetProductSet
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: product_set
+ timeout_millis: 60000
+ - name: UpdateProductSet
+ flattening:
+ groups:
+ - parameters:
+ - product_set
+ - update_mask
+ required_fields:
+ - product_set
+ - update_mask
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ product_set.name: product_set
+ timeout_millis: 60000
+ - name: DeleteProductSet
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: product_set
+ timeout_millis: 60000
+ - name: CreateProduct
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - product
+ - product_id
+ required_fields:
+ - parent
+ - product
+ - product_id
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: location
+ timeout_millis: 60000
+ - name: ListProducts
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ required_fields:
+ - parent
+ resource_name_treatment: STATIC_TYPES
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: products
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: location
+ timeout_millis: 60000
+ - name: GetProduct
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: product
+ timeout_millis: 60000
+ - name: UpdateProduct
+ # params.
+ flattening:
+ groups:
+ - parameters:
+ - product
+ - update_mask
+ required_fields:
+ - product
+ - update_mask
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ product.name: product
+ timeout_millis: 60000
+ - name: DeleteProduct
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: product
+ timeout_millis: 60000
+ - name: CreateReferenceImage
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - reference_image
+ - reference_image_id
+ required_fields:
+ - parent
+ - reference_image
+ - reference_image_id
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: product
+ timeout_millis: 60000
+ - name: DeleteReferenceImage
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: reference_image
+ timeout_millis: 60000
+ - name: ListReferenceImages
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ required_fields:
+ - parent
+ resource_name_treatment: STATIC_TYPES
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: reference_images
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: product
+ timeout_millis: 60000
+ - name: GetReferenceImage
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: reference_image
+ timeout_millis: 60000
+ - name: AddProductToProductSet
+ flattening:
+ groups:
+ - parameters:
+ - name
+ - product
+ required_fields:
+ - name
+ - product
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: product_set
+ timeout_millis: 60000
+ - name: RemoveProductFromProductSet
+ flattening:
+ groups:
+ - parameters:
+ - name
+ - product
+ required_fields:
+ - name
+ - product
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: product_set
+ timeout_millis: 60000
+ - name: ListProductsInProductSet
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ resource_name_treatment: STATIC_TYPES
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: products
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: product_set
+ timeout_millis: 60000
+ - name: ImportProductSets
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - input_config
+ required_fields:
+ - parent
+ - input_config
+ long_running:
+ return_type: google.cloud.vision.v1p4beta1.ImportProductSetsResponse
+ metadata_type: google.cloud.vision.v1p4beta1.BatchOperationMetadata
+ initial_poll_delay_millis: 20000
+ poll_delay_multiplier: 1.5
+ max_poll_delay_millis: 45000
+ total_poll_timeout_millis: 86400000
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: location
+ timeout_millis: 60000
+ # The fully qualified name of the API interface.
+- name: google.cloud.vision.v1p4beta1.ImageAnnotator
+ # A list of resource collection configurations.
+ # Consists of a name_pattern and an entity_name.
+ # The name_pattern is a pattern to describe the names of the resources of this
+ # collection, using the platform's conventions for URI patterns. A generator
+ # may use this to generate methods to compose and decompose such names. The
+ # pattern should use named placeholders as in `shelves/{shelf}/books/{book}`;
+ # those will be taken as hints for the parameter names of the generated
+ # methods. If empty, no name methods are generated.
+ # The entity_name is the name to be used as a basis for generated methods and
+ # classes.
+ collections: []
+ # Definition for retryable codes.
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - DEADLINE_EXCEEDED
+ - UNAVAILABLE
+ - name: non_idempotent
+ retry_codes: []
+ # Definition for retry/backoff parameters.
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 60000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 60000
+ total_timeout_millis: 600000
+ # A list of method configurations.
+ # Common properties:
+ #
+ # name - The simple name of the method.
+ #
+ # flattening - Specifies the configuration for parameter flattening.
+ # Describes the parameter groups for which a generator should produce method
+ # overloads which allow a client to directly pass request message fields as
+ # method parameters. This information may or may not be used, depending on
+ # the target language.
+ # Consists of groups, which each represent a list of parameters to be
+ # flattened. Each parameter listed must be a field of the request message.
+ #
+ # required_fields - Fields that are always required for a request to be
+ # valid.
+ #
+ # resource_name_treatment - An enum that specifies how to treat the resource
+ # name formats defined in the field_name_patterns and
+ # response_field_name_patterns fields.
+ # UNSET: default value
+ # NONE: the collection configs will not be used by the generated code.
+ # VALIDATE: string fields will be validated by the client against the
+ # specified resource name formats.
+ # STATIC_TYPES: the client will use generated types for resource names.
+ #
+ # page_streaming - Specifies the configuration for paging.
+ # Describes information for generating a method which transforms a paging
+ # list RPC into a stream of resources.
+ # Consists of a request and a response.
+ # The request specifies request information of the list method. It defines
+ # which fields match the paging pattern in the request. The request consists
+ # of a page_size_field and a token_field. The page_size_field is the name of
+ # the optional field specifying the maximum number of elements to be
+ # returned in the response. The token_field is the name of the field in the
+ # request containing the page token.
+ # The response specifies response information of the list method. It defines
+ # which fields match the paging pattern in the response. The response
+ # consists of a token_field and a resources_field. The token_field is the
+ # name of the field in the response containing the next page token. The
+ # resources_field is the name of the field in the response containing the
+ # list of resources belonging to the page.
+ #
+ # retry_codes_name - Specifies the configuration for retryable codes. The
+ # name must be defined in interfaces.retry_codes_def.
+ #
+ # retry_params_name - Specifies the configuration for retry/backoff
+ # parameters. The name must be defined in interfaces.retry_params_def.
+ #
+ # field_name_patterns - Maps the field name of the request type to
+ # entity_name of interfaces.collections.
+ # Specifies the string pattern that the field must follow.
+ #
+ # timeout_millis - Specifies the default timeout for a non-retrying call. If
+ # the call is retrying, refer to retry_params_name instead.
+ methods:
+ - name: BatchAnnotateImages
+ flattening:
+ groups:
+ - parameters:
+ - requests
+ required_fields:
+ - requests
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: BatchAnnotateFiles
+ flattening:
+ groups:
+ - parameters:
+ - requests
+ required_fields:
+ - requests
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: AsyncBatchAnnotateImages
+ flattening:
+ groups:
+ - parameters:
+ - requests
+ - output_config
+ required_fields:
+ - requests
+ - output_config
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ long_running:
+ return_type: google.cloud.vision.v1p4beta1.AsyncBatchAnnotateImagesResponse
+ metadata_type: google.cloud.vision.v1p4beta1.OperationMetadata
+ initial_poll_delay_millis: 500
+ poll_delay_multiplier: 1.5
+ max_poll_delay_millis: 5000
+ total_poll_timeout_millis: 300000
+ timeout_millis: 60000
+ - name: AsyncBatchAnnotateFiles
+ flattening:
+ groups:
+ - parameters:
+ - requests
+ required_fields:
+ - requests
+ long_running:
+ return_type: google.cloud.vision.v1p4beta1.AsyncBatchAnnotateFilesResponse
+ metadata_type: google.cloud.vision.v1p4beta1.OperationMetadata
+ initial_poll_delay_millis: 20000
+ poll_delay_multiplier: 1.5
+ max_poll_delay_millis: 45000
+ total_poll_timeout_millis: 86400000
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+resource_name_generation:
+- message_name: CreateProductSetRequest
+ field_entity_map:
+ parent: location
+- message_name: ListProductSetsRequest
+ field_entity_map:
+ parent: location
+- message_name: GetProductSetRequest
+ field_entity_map:
+ name: product_set
+- message_name: UpdateProductSetRequest
+ field_entity_map:
+ product_set.name: product_set
+- message_name: DeleteProductSetRequest
+ field_entity_map:
+ name: product_set
+- message_name: CreateProductRequest
+ field_entity_map:
+ parent: location
+- message_name: ListProductsRequest
+ field_entity_map:
+ parent: location
+- message_name: GetProductRequest
+ field_entity_map:
+ name: product
+- message_name: UpdateProductRequest
+ field_entity_map:
+ product.name: product
+- message_name: DeleteProductRequest
+ field_entity_map:
+ name: product
+- message_name: CreateReferenceImageRequest
+ field_entity_map:
+ parent: product
+- message_name: DeleteReferenceImageRequest
+ field_entity_map:
+ name: reference_image
+- message_name: ListReferenceImagesRequest
+ field_entity_map:
+ parent: product
+- message_name: GetReferenceImageRequest
+ field_entity_map:
+ name: reference_image
+- message_name: AddProductToProductSetRequest
+ field_entity_map:
+ name: product_set
+- message_name: RemoveProductFromProductSetRequest
+ field_entity_map:
+ name: product_set
+- message_name: ListProductsInProductSetRequest
+ field_entity_map:
+ name: product_set
+- message_name: ImportProductSetsRequest
+ field_entity_map:
+ parent: location
diff --git a/google/cloud/vision/v1p4beta1/web_detection.proto b/google/cloud/vision/v1p4beta1/web_detection.proto
new file mode 100644
index 000000000..15822563b
--- /dev/null
+++ b/google/cloud/vision/v1p4beta1/web_detection.proto
@@ -0,0 +1,107 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p4beta1;
+
+import "google/api/annotations.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p4beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "WebDetectionProto";
+option java_package = "com.google.cloud.vision.v1p4beta1";
+option objc_class_prefix = "GCVN";
+
+// Relevant information for the image from the Internet.
+message WebDetection {
+ // Entity deduced from similar images on the Internet.
+ message WebEntity {
+ // Opaque entity ID.
+ string entity_id = 1;
+
+ // Overall relevancy score for the entity.
+ // Not normalized and not comparable across different image queries.
+ float score = 2;
+
+ // Canonical description of the entity, in English.
+ string description = 3;
+ }
+
+ // Metadata for online images.
+ message WebImage {
+ // The result image URL.
+ string url = 1;
+
+ // (Deprecated) Overall relevancy score for the image.
+ float score = 2;
+ }
+
+ // Label to provide extra metadata for the web detection.
+ message WebLabel {
+ // Label for extra metadata.
+ string label = 1;
+
+ // The BCP-47 language code for `label`, such as "en-US" or "sr-Latn".
+ // For more information, see
+ // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+ string language_code = 2;
+ }
+
+ // Metadata for web pages.
+ message WebPage {
+ // The result web page URL.
+ string url = 1;
+
+ // (Deprecated) Overall relevancy score for the web page.
+ float score = 2;
+
+ // Title for the web page, may contain HTML markups.
+ string page_title = 3;
+
+ // Fully matching images on the page.
+ // Can include resized copies of the query image.
+ repeated WebImage full_matching_images = 4;
+
+ // Partial matching images on the page.
+ // Those images are similar enough to share some key-point features. For
+ // example an original image will likely have partial matching for its
+ // crops.
+ repeated WebImage partial_matching_images = 5;
+ }
+
+ // Deduced entities from similar images on the Internet.
+ repeated WebEntity web_entities = 1;
+
+ // Fully matching images from the Internet.
+ // Can include resized copies of the query image.
+ repeated WebImage full_matching_images = 2;
+
+ // Partial matching images from the Internet.
+ // Those images are similar enough to share some key-point features. For
+ // example an original image will likely have partial matching for its crops.
+ repeated WebImage partial_matching_images = 3;
+
+ // Web pages containing the matching images from the Internet.
+ repeated WebPage pages_with_matching_images = 4;
+
+ // The visually similar image results.
+ repeated WebImage visually_similar_images = 6;
+
+ // The service's best guess as to the topic of the request image.
+ // Inferred from similar images on the open web.
+ repeated WebLabel best_guess_labels = 8;
+}
diff --git a/google/cloud/vision/vision_v1.yaml b/google/cloud/vision/vision_v1.yaml
new file mode 100644
index 000000000..8491f774c
--- /dev/null
+++ b/google/cloud/vision/vision_v1.yaml
@@ -0,0 +1,37 @@
+type: google.api.Service
+config_version: 3
+name: vision.googleapis.com
+title: Cloud Vision API
+
+apis:
+- name: google.cloud.vision.v1.ProductSearch
+- name: google.cloud.vision.v1.ImageAnnotator
+
+types:
+- name: google.cloud.vision.v1.AnnotateFileResponse
+- name: google.cloud.vision.v1.AsyncBatchAnnotateFilesResponse
+- name: google.cloud.vision.v1.BatchOperationMetadata
+- name: google.cloud.vision.v1.ImportProductSetsResponse
+- name: google.cloud.vision.v1.OperationMetadata
+
+documentation:
+ summary: |-
+ Integrates Google Vision features, including image labeling, face, logo, and
+ landmark detection, optical character recognition (OCR), and detection of
+ explicit content, into applications.
+
+http:
+ rules:
+ - selector: google.longrunning.Operations.GetOperation
+ get: '/v1/{name=operations/*}'
+ additional_bindings:
+ - get: '/v1/{name=locations/*/operations/*}'
+
+
+authentication:
+ rules:
+ - selector: '*'
+ oauth:
+ canonical_scopes: |-
+ https://www.googleapis.com/auth/cloud-platform,
+ https://www.googleapis.com/auth/cloud-vision
diff --git a/google/cloud/vision/vision_v1p1beta1.yaml b/google/cloud/vision/vision_v1p1beta1.yaml
new file mode 100644
index 000000000..81ff06d19
--- /dev/null
+++ b/google/cloud/vision/vision_v1p1beta1.yaml
@@ -0,0 +1,21 @@
+type: google.api.Service
+config_version: 3
+name: vision.googleapis.com
+title: Google Cloud Vision API
+
+apis:
+- name: google.cloud.vision.v1p1beta1.ImageAnnotator
+
+documentation:
+ summary: |-
+ Integrates Google Vision features, including image labeling, face, logo, and
+ landmark detection, optical character recognition (OCR), and detection of
+ explicit content, into applications.
+
+authentication:
+ rules:
+ - selector: '*'
+ oauth:
+ canonical_scopes: |-
+ https://www.googleapis.com/auth/cloud-platform,
+ https://www.googleapis.com/auth/cloud-vision
diff --git a/google/cloud/vision/vision_v1p2beta1.yaml b/google/cloud/vision/vision_v1p2beta1.yaml
new file mode 100644
index 000000000..0065f6302
--- /dev/null
+++ b/google/cloud/vision/vision_v1p2beta1.yaml
@@ -0,0 +1,26 @@
+type: google.api.Service
+config_version: 3
+name: vision.googleapis.com
+title: Google Cloud Vision API
+
+apis:
+- name: google.cloud.vision.v1p2beta1.ImageAnnotator
+
+types:
+- name: google.cloud.vision.v1p2beta1.AnnotateFileResponse
+- name: google.cloud.vision.v1p2beta1.AsyncBatchAnnotateFilesResponse
+- name: google.cloud.vision.v1p2beta1.OperationMetadata
+
+documentation:
+ summary: |-
+ Integrates Google Vision features, including image labeling, face, logo, and
+ landmark detection, optical character recognition (OCR), and detection of
+ explicit content, into applications.
+
+authentication:
+ rules:
+ - selector: '*'
+ oauth:
+ canonical_scopes: |-
+ https://www.googleapis.com/auth/cloud-platform,
+ https://www.googleapis.com/auth/cloud-vision
diff --git a/google/cloud/vision/vision_v1p3beta1.yaml b/google/cloud/vision/vision_v1p3beta1.yaml
new file mode 100644
index 000000000..8c384f050
--- /dev/null
+++ b/google/cloud/vision/vision_v1p3beta1.yaml
@@ -0,0 +1,37 @@
+type: google.api.Service
+config_version: 3
+name: vision.googleapis.com
+title: Cloud Vision API
+
+apis:
+- name: google.cloud.vision.v1p3beta1.ProductSearch
+- name: google.cloud.vision.v1p3beta1.ImageAnnotator
+
+types:
+- name: google.cloud.vision.v1p3beta1.AnnotateFileResponse
+- name: google.cloud.vision.v1p3beta1.AsyncBatchAnnotateFilesResponse
+- name: google.cloud.vision.v1p3beta1.BatchOperationMetadata
+- name: google.cloud.vision.v1p3beta1.ImportProductSetsResponse
+- name: google.cloud.vision.v1p3beta1.OperationMetadata
+
+documentation:
+ summary: |-
+ Integrates Google Vision features, including image labeling, face, logo, and
+ landmark detection, optical character recognition (OCR), and detection of
+ explicit content, into applications.
+
+http:
+ rules:
+ - selector: google.longrunning.Operations.GetOperation
+ get: '/v1/{name=operations/*}'
+ additional_bindings:
+ - get: '/v1/{name=locations/*/operations/*}'
+
+
+authentication:
+ rules:
+ - selector: '*'
+ oauth:
+ canonical_scopes: |-
+ https://www.googleapis.com/auth/cloud-platform,
+ https://www.googleapis.com/auth/cloud-vision
diff --git a/google/cloud/vision/vision_v1p4beta1.yaml b/google/cloud/vision/vision_v1p4beta1.yaml
new file mode 100644
index 000000000..f5bdd9b9b
--- /dev/null
+++ b/google/cloud/vision/vision_v1p4beta1.yaml
@@ -0,0 +1,39 @@
+type: google.api.Service
+config_version: 3
+name: vision.googleapis.com
+title: Cloud Vision API
+
+apis:
+- name: google.cloud.vision.v1p4beta1.ProductSearch
+- name: google.cloud.vision.v1p4beta1.ImageAnnotator
+
+types:
+- name: google.cloud.vision.v1p4beta1.BatchOperationMetadata
+- name: google.cloud.vision.v1p4beta1.ImportProductSetsResponse
+- name: google.cloud.vision.v1p4beta1.AnnotateFileResponse
+- name: google.cloud.vision.v1p4beta1.AsyncBatchAnnotateFilesResponse
+- name: google.cloud.vision.v1p4beta1.OperationMetadata
+- name: google.cloud.vision.v1p4beta1.AsyncBatchAnnotateImagesResponse
+- name: google.cloud.vision.v1p4beta1.BatchAnnotateFilesResponse
+
+documentation:
+ summary: |-
+ Integrates Google Vision features, including image labeling, face, logo, and
+ landmark detection, optical character recognition (OCR), and detection of
+ explicit content, into applications.
+
+http:
+ rules:
+ - selector: google.longrunning.Operations.GetOperation
+ get: '/v1/{name=operations/*}'
+ additional_bindings:
+ - get: '/v1/{name=locations/*/operations/*}'
+
+
+authentication:
+ rules:
+ - selector: '*'
+ oauth:
+ canonical_scopes: |-
+ https://www.googleapis.com/auth/cloud-platform,
+ https://www.googleapis.com/auth/cloud-vision