aboutsummaryrefslogtreecommitdiff
path: root/google/cloud/datalabeling/v1beta1
diff options
context:
space:
mode:
Diffstat (limited to 'google/cloud/datalabeling/v1beta1')
-rw-r--r--google/cloud/datalabeling/v1beta1/annotation.proto337
-rw-r--r--google/cloud/datalabeling/v1beta1/annotation_spec_set.proto56
-rw-r--r--google/cloud/datalabeling/v1beta1/data_labeling_service.proto728
-rw-r--r--google/cloud/datalabeling/v1beta1/datalabeling_gapic.yaml540
-rw-r--r--google/cloud/datalabeling/v1beta1/dataset.proto306
-rw-r--r--google/cloud/datalabeling/v1beta1/human_annotation_config.proto208
-rw-r--r--google/cloud/datalabeling/v1beta1/instruction.proto78
-rw-r--r--google/cloud/datalabeling/v1beta1/operations.proto206
8 files changed, 2459 insertions, 0 deletions
diff --git a/google/cloud/datalabeling/v1beta1/annotation.proto b/google/cloud/datalabeling/v1beta1/annotation.proto
new file mode 100644
index 000000000..204ad8e3d
--- /dev/null
+++ b/google/cloud/datalabeling/v1beta1/annotation.proto
@@ -0,0 +1,337 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.datalabeling.v1beta1;
+
+import "google/cloud/datalabeling/v1beta1/annotation_spec_set.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/struct.proto";
+import "google/protobuf/timestamp.proto";
+import "google/api/annotations.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/datalabeling/v1beta1;datalabeling";
+option java_multiple_files = true;
+option java_package = "com.google.cloud.datalabeling.v1beta1";
+
+
+// Specifies where is the answer from.
+enum AnnotationSource {
+ ANNOTATION_SOURCE_UNSPECIFIED = 0;
+
+ // Answer is provided by a human contributor.
+ OPERATOR = 3;
+}
+
+enum AnnotationSentiment {
+ ANNOTATION_SENTIMENT_UNSPECIFIED = 0;
+
+ // This annotation describes negatively about the data.
+ NEGATIVE = 1;
+
+ // This label describes positively about the data.
+ POSITIVE = 2;
+}
+
+enum AnnotationType {
+ ANNOTATION_TYPE_UNSPECIFIED = 0;
+
+ // Classification annotations in an image.
+ IMAGE_CLASSIFICATION_ANNOTATION = 1;
+
+ // Bounding box annotations in an image.
+ IMAGE_BOUNDING_BOX_ANNOTATION = 2;
+
+ // Oriented bounding box. The box does not have to be parallel to horizontal
+ // line.
+ IMAGE_ORIENTED_BOUNDING_BOX_ANNOTATION = 13;
+
+ // Bounding poly annotations in an image.
+ IMAGE_BOUNDING_POLY_ANNOTATION = 10;
+
+ // Polyline annotations in an image.
+ IMAGE_POLYLINE_ANNOTATION = 11;
+
+ // Segmentation annotations in an image.
+ IMAGE_SEGMENTATION_ANNOTATION = 12;
+
+ // Classification annotations in video shots.
+ VIDEO_SHOTS_CLASSIFICATION_ANNOTATION = 3;
+
+ // Video object tracking annotation.
+ VIDEO_OBJECT_TRACKING_ANNOTATION = 4;
+
+ // Video object detection annotation.
+ VIDEO_OBJECT_DETECTION_ANNOTATION = 5;
+
+ // Video event annotation.
+ VIDEO_EVENT_ANNOTATION = 6;
+
+ // Speech to text annotation.
+ AUDIO_TRANSCRIPTION_ANNOTATION = 7;
+
+ // Classification for text.
+ TEXT_CLASSIFICATION_ANNOTATION = 8;
+
+ // Entity extraction for text.
+ TEXT_ENTITY_EXTRACTION_ANNOTATION = 9;
+}
+
+// Annotation for Example. Each example may have one or more annotations. For
+// example in image classification problem, each image might have one or more
+// labels. We call labels binded with this image an Annotation.
+message Annotation {
+ // Output only. Unique name of this annotation, format is:
+ //
+ // projects/{project_id}/datasets/{dataset_id}/annotatedDatasets/{annotated_dataset}/examples/{example_id}/annotations/{annotation_id}
+ string name = 1;
+
+ // Output only. The source of the annotation.
+ AnnotationSource annotation_source = 2;
+
+ // Output only. This is the actual annotation value, e.g classification,
+ // bounding box values are stored here.
+ AnnotationValue annotation_value = 3;
+
+ // Output only. Annotation metadata, including information like votes
+ // for labels.
+ AnnotationMetadata annotation_metadata = 4;
+
+ // Output only. Sentiment for this annotation.
+ AnnotationSentiment annotation_sentiment = 6;
+}
+
+// Annotation value for an example.
+message AnnotationValue {
+ oneof value_type {
+ // Annotation value for image classification case.
+ ImageClassificationAnnotation image_classification_annotation = 1;
+
+ // Annotation value for image bounding box, oriented bounding box
+ // and polygon cases.
+ ImageBoundingPolyAnnotation image_bounding_poly_annotation = 2;
+
+ // Annotation value for image polyline cases.
+ // Polyline here is different from BoundingPoly. It is formed by
+ // line segments connected to each other but not closed form(Bounding Poly).
+ // The line segments can cross each other.
+ ImagePolylineAnnotation image_polyline_annotation = 8;
+
+ // Annotation value for image segmentation.
+ ImageSegmentationAnnotation image_segmentation_annotation = 9;
+
+ // Annotation value for text classification case.
+ TextClassificationAnnotation text_classification_annotation = 3;
+
+ // Annotation value for video classification case.
+ VideoClassificationAnnotation video_classification_annotation = 4;
+
+ // Annotation value for video object detection and tracking case.
+ VideoObjectTrackingAnnotation video_object_tracking_annotation = 5;
+
+ // Annotation value for video event case.
+ VideoEventAnnotation video_event_annotation = 6;
+
+ // Annotation value for speech audio recognition case.
+ AudioRecognitionAnnotation audio_recognition_annotation = 7;
+ }
+}
+
+// Image classification annotation definition.
+message ImageClassificationAnnotation {
+ // Label of image.
+ AnnotationSpec annotation_spec = 1;
+}
+
+// A vertex represents a 2D point in the image.
+// NOTE: the vertex coordinates are in the same scale as the original image.
+message Vertex {
+ // X coordinate.
+ int32 x = 1;
+
+ // Y coordinate.
+ int32 y = 2;
+}
+
+// A vertex represents a 2D point in the image.
+// NOTE: the normalized vertex coordinates are relative to the original image
+// and range from 0 to 1.
+message NormalizedVertex {
+ // X coordinate.
+ float x = 1;
+
+ // Y coordinate.
+ float y = 2;
+}
+
+// A bounding polygon in the image.
+message BoundingPoly {
+ // The bounding polygon vertices.
+ repeated Vertex vertices = 1;
+}
+
+// Normalized bounding polygon.
+message NormalizedBoundingPoly {
+ // The bounding polygon normalized vertices.
+ repeated NormalizedVertex normalized_vertices = 1;
+}
+
+// Image bounding poly annotation. It represents a polygon including
+// bounding box in the image.
+message ImageBoundingPolyAnnotation {
+ // The region of the polygon. If it is a bounding box, it is guaranteed to be
+ // four points.
+ oneof bounded_area {
+ BoundingPoly bounding_poly = 2;
+
+ NormalizedBoundingPoly normalized_bounding_poly = 3;
+ }
+
+ // Label of object in this bounding polygon.
+ AnnotationSpec annotation_spec = 1;
+}
+
+// A line with multiple line segments.
+message Polyline {
+ // The polyline vertices.
+ repeated Vertex vertices = 1;
+}
+
+// Normalized polyline.
+message NormalizedPolyline {
+ // The normalized polyline vertices.
+ repeated NormalizedVertex normalized_vertices = 1;
+}
+
+// A polyline for the image annotation.
+message ImagePolylineAnnotation {
+ oneof poly {
+ Polyline polyline = 2;
+
+ NormalizedPolyline normalized_polyline = 3;
+ }
+
+ // Label of this polyline.
+ AnnotationSpec annotation_spec = 1;
+}
+
+// Image segmentation annotation.
+message ImageSegmentationAnnotation {
+ // The mapping between rgb color and annotation spec. The key is the rgb
+ // color represented in format of rgb(0, 0, 0). The value is the
+ // AnnotationSpec.
+ map<string, AnnotationSpec> annotation_colors = 1;
+
+ // Image format.
+ string mime_type = 2;
+
+ // A byte string of a full image's color map.
+ bytes image_bytes = 3;
+}
+
+// Text classification annotation.
+message TextClassificationAnnotation {
+ // Label of the text.
+ AnnotationSpec annotation_spec = 1;
+}
+
+// A time period inside of an example that has a time dimension (e.g. video).
+message TimeSegment {
+ // Start of the time segment (inclusive), represented as the duration since
+ // the example start.
+ google.protobuf.Duration start_time_offset = 1;
+
+ // End of the time segment (exclusive), represented as the duration since the
+ // example start.
+ google.protobuf.Duration end_time_offset = 2;
+}
+
+// Video classification annotation.
+message VideoClassificationAnnotation {
+ // The time segment of the video to which the annotation applies.
+ TimeSegment time_segment = 1;
+
+ // Label of the segment specified by time_segment.
+ AnnotationSpec annotation_spec = 2;
+}
+
+// Video frame level annotation for object detection and tracking.
+message ObjectTrackingFrame {
+ // The bounding box location of this object track for the frame.
+ oneof bounded_area {
+ BoundingPoly bounding_poly = 1;
+
+ NormalizedBoundingPoly normalized_bounding_poly = 2;
+ }
+
+ // The time offset of this frame relative to the beginning of the video.
+ google.protobuf.Duration time_offset = 3;
+}
+
+// Video object tracking annotation.
+message VideoObjectTrackingAnnotation {
+ // Label of the object tracked in this annotation.
+ AnnotationSpec annotation_spec = 1;
+
+ // The time segment of the video to which object tracking applies.
+ TimeSegment time_segment = 2;
+
+ // The list of frames where this object track appears.
+ repeated ObjectTrackingFrame object_tracking_frames = 3;
+}
+
+// Video event annotation.
+message VideoEventAnnotation {
+ // Label of the event in this annotation.
+ AnnotationSpec annotation_spec = 1;
+
+ // The time segment of the video to which the annotation applies.
+ TimeSegment time_segment = 2;
+}
+
+// Speech audio recognition.
+message AudioRecognitionAnnotation {
+ // Transcript text representing the words spoken.
+ string transcript = 1;
+
+ // Start position in audio file that the transcription corresponds to.
+ google.protobuf.Duration start_offset = 2;
+
+ // End position in audio file that the transcription corresponds to.
+ google.protobuf.Duration end_offset = 3;
+}
+
+// Additional information associated with the annotation.
+message AnnotationMetadata {
+ // Metadata related to human labeling.
+ OperatorMetadata operator_metadata = 2;
+}
+
+// General information useful for labels coming from contributors.
+message OperatorMetadata {
+ // Confidence score corresponding to a label. For examle, if 3 contributors
+ // have answered the question and 2 of them agree on the final label, the
+ // confidence score will be 0.67 (2/3).
+ float score = 1;
+
+ // The total number of contributors that answer this question.
+ int32 total_votes = 2;
+
+ // The total number of contributors that choose this label.
+ int32 label_votes = 3;
+
+ repeated string comments = 4;
+}
diff --git a/google/cloud/datalabeling/v1beta1/annotation_spec_set.proto b/google/cloud/datalabeling/v1beta1/annotation_spec_set.proto
new file mode 100644
index 000000000..20fdec73c
--- /dev/null
+++ b/google/cloud/datalabeling/v1beta1/annotation_spec_set.proto
@@ -0,0 +1,56 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.datalabeling.v1beta1;
+
+import "google/api/annotations.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/datalabeling/v1beta1;datalabeling";
+option java_multiple_files = true;
+option java_package = "com.google.cloud.datalabeling.v1beta1";
+
+
+// AnnotationSpecSet is a collection of label definitions. For example, in
+// image classification tasks, we define a set of labels, this set is called
+// AnnotationSpecSet. AnnotationSpecSet is immutable upon creation.
+message AnnotationSpecSet {
+ // Output only.
+ // AnnotationSpecSet resource name, format:
+ // projects/{project_id}/annotationSpecSets/{annotation_spec_set_id}
+ string name = 1;
+
+ // Required. The display name for AnnotationSpecSet defined by user.
+ // Maximum of 64 characters.
+ string display_name = 2;
+
+ // Optional. User-provided description of the annotation specification set.
+ // The description can be up to 10000 characters long.
+ string description = 3;
+
+ // Required. The actual spec set defined by the users.
+ repeated AnnotationSpec annotation_specs = 4;
+}
+
+// Container of information related to one annotation spec.
+message AnnotationSpec {
+ // Required. The display name of the AnnotationSpec. Maximum of 64 characters.
+ string display_name = 1;
+
+ // Optional. User-provided description of the annotation specification.
+ // The description can be up to 10000 characters long.
+ string description = 2;
+}
diff --git a/google/cloud/datalabeling/v1beta1/data_labeling_service.proto b/google/cloud/datalabeling/v1beta1/data_labeling_service.proto
new file mode 100644
index 000000000..22222ab71
--- /dev/null
+++ b/google/cloud/datalabeling/v1beta1/data_labeling_service.proto
@@ -0,0 +1,728 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.datalabeling.v1beta1;
+
+import "google/api/annotations.proto";
+import "google/cloud/datalabeling/v1beta1/annotation_spec_set.proto";
+import "google/cloud/datalabeling/v1beta1/dataset.proto";
+import "google/cloud/datalabeling/v1beta1/human_annotation_config.proto";
+import "google/cloud/datalabeling/v1beta1/instruction.proto";
+import "google/cloud/datalabeling/v1beta1/operations.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/field_mask.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/datalabeling/v1beta1;datalabeling";
+option java_multiple_files = true;
+option java_package = "com.google.cloud.datalabeling.v1beta1";
+
+
+service DataLabelingService {
+ // Creates dataset. If success return a Dataset resource.
+ rpc CreateDataset(CreateDatasetRequest) returns (Dataset) {
+ option (google.api.http) = {
+ post: "/v1beta1/{parent=projects/*}/datasets"
+ body: "*"
+ };
+ }
+
+ // Gets dataset by resource name.
+ rpc GetDataset(GetDatasetRequest) returns (Dataset) {
+ option (google.api.http) = {
+ get: "/v1beta1/{name=projects/*/datasets/*}"
+ };
+ }
+
+ // Lists datasets under a project. Pagination is supported.
+ rpc ListDatasets(ListDatasetsRequest) returns (ListDatasetsResponse) {
+ option (google.api.http) = {
+ get: "/v1beta1/{parent=projects/*}/datasets"
+ };
+ }
+
+ // Deletes a dataset by resource name.
+ rpc DeleteDataset(DeleteDatasetRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ delete: "/v1beta1/{name=projects/*/datasets/*}"
+ };
+ }
+
+ // Imports data into dataset based on source locations defined in request.
+ // It can be called multiple times for the same dataset. Each dataset can
+ // only have one long running operation running on it. For example, no
+ // labeling task (also long running operation) can be started while
+ // importing is still ongoing. Vice versa.
+ rpc ImportData(ImportDataRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1beta1/{name=projects/*/datasets/*}:importData"
+ body: "*"
+ };
+ }
+
+ // Exports data and annotations from dataset.
+ rpc ExportData(ExportDataRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1beta1/{name=projects/*/datasets/*}:exportData"
+ body: "*"
+ };
+ }
+
+ // Gets a data item in a dataset by resource name. This API can be
+ // called after data are imported into dataset.
+ rpc GetDataItem(GetDataItemRequest) returns (DataItem) {
+ option (google.api.http) = {
+ get: "/v1beta1/{name=projects/*/datasets/*/dataItems/*}"
+ };
+ }
+
+ // Lists data items in a dataset. This API can be called after data
+ // are imported into dataset. Pagination is supported.
+ rpc ListDataItems(ListDataItemsRequest) returns (ListDataItemsResponse) {
+ option (google.api.http) = {
+ get: "/v1beta1/{parent=projects/*/datasets/*}/dataItems"
+ };
+ }
+
+ // Gets an annotated dataset by resource name.
+ rpc GetAnnotatedDataset(GetAnnotatedDatasetRequest) returns (AnnotatedDataset) {
+ option (google.api.http) = {
+ get: "/v1beta1/{name=projects/*/datasets/*/annotatedDatasets/*}"
+ };
+ }
+
+ // Lists annotated datasets for a dataset. Pagination is supported.
+ rpc ListAnnotatedDatasets(ListAnnotatedDatasetsRequest) returns (ListAnnotatedDatasetsResponse) {
+ option (google.api.http) = {
+ get: "/v1beta1/{parent=projects/*/datasets/*}/annotatedDatasets"
+ };
+ }
+
+ // Deletes an annotated dataset by resource name.
+ rpc DeleteAnnotatedDataset(DeleteAnnotatedDatasetRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ delete: "/v1beta1/{name=projects/*/datasets/*/annotatedDatasets/*}"
+ };
+ }
+
+ // Starts a labeling task for image. The type of image labeling task is
+ // configured by feature in the request.
+ rpc LabelImage(LabelImageRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1beta1/{parent=projects/*/datasets/*}/image:label"
+ body: "*"
+ };
+ }
+
+ // Starts a labeling task for video. The type of video labeling task is
+ // configured by feature in the request.
+ rpc LabelVideo(LabelVideoRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1beta1/{parent=projects/*/datasets/*}/video:label"
+ body: "*"
+ };
+ }
+
+ // Starts a labeling task for text. The type of text labeling task is
+ // configured by feature in the request.
+ rpc LabelText(LabelTextRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1beta1/{parent=projects/*/datasets/*}/text:label"
+ body: "*"
+ };
+ }
+
+ // Starts a labeling task for audio. The type of audio labeling task is
+ // configured by feature in the request.
+ rpc LabelAudio(LabelAudioRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1beta1/{parent=projects/*/datasets/*}/audio:label"
+ body: "*"
+ };
+ }
+
+ // Gets an example by resource name, including both data and annotation.
+ rpc GetExample(GetExampleRequest) returns (Example) {
+ option (google.api.http) = {
+ get: "/v1beta1/{name=projects/*/datasets/*/annotatedDatasets/*/examples/*}"
+ };
+ }
+
+ // Lists examples in an annotated dataset. Pagination is supported.
+ rpc ListExamples(ListExamplesRequest) returns (ListExamplesResponse) {
+ option (google.api.http) = {
+ get: "/v1beta1/{parent=projects/*/datasets/*/annotatedDatasets/*}/examples"
+ };
+ }
+
+ // Creates an annotation spec set by providing a set of labels.
+ rpc CreateAnnotationSpecSet(CreateAnnotationSpecSetRequest) returns (AnnotationSpecSet) {
+ option (google.api.http) = {
+ post: "/v1beta1/{parent=projects/*}/annotationSpecSets"
+ body: "*"
+ };
+ }
+
+ // Gets an annotation spec set by resource name.
+ rpc GetAnnotationSpecSet(GetAnnotationSpecSetRequest) returns (AnnotationSpecSet) {
+ option (google.api.http) = {
+ get: "/v1beta1/{name=projects/*/annotationSpecSets/*}"
+ };
+ }
+
+ // Lists annotation spec sets for a project. Pagination is supported.
+ rpc ListAnnotationSpecSets(ListAnnotationSpecSetsRequest) returns (ListAnnotationSpecSetsResponse) {
+ option (google.api.http) = {
+ get: "/v1beta1/{parent=projects/*}/annotationSpecSets"
+ };
+ }
+
+ // Deletes an annotation spec set by resource name.
+ rpc DeleteAnnotationSpecSet(DeleteAnnotationSpecSetRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ delete: "/v1beta1/{name=projects/*/annotationSpecSets/*}"
+ };
+ }
+
+ // Creates an instruction for how data should be labeled.
+ rpc CreateInstruction(CreateInstructionRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1beta1/{parent=projects/*}/instructions"
+ body: "*"
+ };
+ }
+
+ // Gets an instruction by resource name.
+ rpc GetInstruction(GetInstructionRequest) returns (Instruction) {
+ option (google.api.http) = {
+ get: "/v1beta1/{name=projects/*/instructions/*}"
+ };
+ }
+
+ // Lists instructions for a project. Pagination is supported.
+ rpc ListInstructions(ListInstructionsRequest) returns (ListInstructionsResponse) {
+ option (google.api.http) = {
+ get: "/v1beta1/{parent=projects/*}/instructions"
+ };
+ }
+
+ // Deletes an instruction object by resource name.
+ rpc DeleteInstruction(DeleteInstructionRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ delete: "/v1beta1/{name=projects/*/instructions/*}"
+ };
+ }
+}
+
+// Request message for CreateDataset.
+message CreateDatasetRequest {
+ // Required. Dataset resource parent, format:
+ // projects/{project_id}
+ string parent = 1;
+
+ // Required. The dataset to be created.
+ Dataset dataset = 2;
+}
+
+// Request message for GetDataSet.
+message GetDatasetRequest {
+ // Required. Dataset resource name, format:
+ // projects/{project_id}/datasets/{dataset_id}
+ string name = 1;
+}
+
+// Request message for ListDataset.
+message ListDatasetsRequest {
+ // Required. Dataset resource parent, format:
+ // projects/{project_id}
+ string parent = 1;
+
+ // Optional. Filter on dataset is not supported at this moment.
+ string filter = 2;
+
+ // Optional. Requested page size. Server may return fewer results than
+ // requested. Default value is 100.
+ int32 page_size = 3;
+
+ // Optional. A token identifying a page of results for the server to return.
+ // Typically obtained by
+ // [ListDatasetsResponse.next_page_token][google.cloud.datalabeling.v1beta1.ListDatasetsResponse.next_page_token] of the previous
+ // [DataLabelingService.ListDatasets] call.
+ // Returns the first page if empty.
+ string page_token = 4;
+}
+
+// Results of listing datasets within a project.
+message ListDatasetsResponse {
+ // The list of datasets to return.
+ repeated Dataset datasets = 1;
+
+ // A token to retrieve next page of results.
+ string next_page_token = 2;
+}
+
+// Request message for DeleteDataset.
+message DeleteDatasetRequest {
+ // Required. Dataset resource name, format:
+ // projects/{project_id}/datasets/{dataset_id}
+ string name = 1;
+}
+
+// Request message for ImportData API.
+message ImportDataRequest {
+ // Required. Dataset resource name, format:
+ // projects/{project_id}/datasets/{dataset_id}
+ string name = 1;
+
+ // Required. Specify the input source of the data.
+ InputConfig input_config = 2;
+}
+
+// Request message for ExportData API.
+message ExportDataRequest {
+ // Required. Dataset resource name, format:
+ // projects/{project_id}/datasets/{dataset_id}
+ string name = 1;
+
+ // Required. Annotated dataset resource name. DataItem in
+ // Dataset and their annotations in specified annotated dataset will be
+ // exported. It's in format of
+ // projects/{project_id}/datasets/{dataset_id}/annotatedDatasets/
+ // {annotated_dataset_id}
+ string annotated_dataset = 2;
+
+ // Optional. Filter is not supported at this moment.
+ string filter = 3;
+
+ // Required. Specify the output destination.
+ OutputConfig output_config = 4;
+}
+
+// Request message for GetDataItem.
+message GetDataItemRequest {
+ // Required. The name of the data item to get, format:
+ // projects/{project_id}/datasets/{dataset_id}/dataItems/{data_item_id}
+ string name = 1;
+}
+
+// Request message for ListDataItems.
+message ListDataItemsRequest {
+ // Required. Name of the dataset to list data items, format:
+ // projects/{project_id}/datasets/{dataset_id}
+ string parent = 1;
+
+ // Optional. Filter is not supported at this moment.
+ string filter = 2;
+
+ // Optional. Requested page size. Server may return fewer results than
+ // requested. Default value is 100.
+ int32 page_size = 3;
+
+ // Optional. A token identifying a page of results for the server to return.
+ // Typically obtained by
+ // [ListDataItemsResponse.next_page_token][google.cloud.datalabeling.v1beta1.ListDataItemsResponse.next_page_token] of the previous
+ // [DataLabelingService.ListDataItems] call.
+ // Return first page if empty.
+ string page_token = 4;
+}
+
+// Results of listing data items in a dataset.
+message ListDataItemsResponse {
+ // The list of data items to return.
+ repeated DataItem data_items = 1;
+
+ // A token to retrieve next page of results.
+ string next_page_token = 2;
+}
+
+// Request message for GetAnnotatedDataset.
+message GetAnnotatedDatasetRequest {
+ // Required. Name of the annotated dataset to get, format:
+ // projects/{project_id}/datasets/{dataset_id}/annotatedDatasets/
+ // {annotated_dataset_id}
+ string name = 1;
+}
+
+// Request message for ListAnnotatedDatasets.
+message ListAnnotatedDatasetsRequest {
+ // Required. Name of the dataset to list annotated datasets, format:
+ // projects/{project_id}/datasets/{dataset_id}
+ string parent = 1;
+
+ // Optional. Filter is not supported at this moment.
+ string filter = 2;
+
+ // Optional. Requested page size. Server may return fewer results than
+ // requested. Default value is 100.
+ int32 page_size = 3;
+
+ // Optional. A token identifying a page of results for the server to return.
+ // Typically obtained by
+ // [ListAnnotatedDatasetsResponse.next_page_token][google.cloud.datalabeling.v1beta1.ListAnnotatedDatasetsResponse.next_page_token] of the previous
+ // [DataLabelingService.ListAnnotatedDatasets] call.
+ // Return first page if empty.
+ string page_token = 4;
+}
+
+// Request message for DeleteAnnotatedDataset.
+message DeleteAnnotatedDatasetRequest {
+ // Required. Name of the annotated dataset to delete, format:
+ // projects/{project_id}/datasets/{dataset_id}/annotatedDatasets/
+ // {annotated_dataset_id}
+ string name = 1;
+}
+
+// Results of listing annotated datasets for a dataset.
+message ListAnnotatedDatasetsResponse {
+ // The list of annotated datasets to return.
+ repeated AnnotatedDataset annotated_datasets = 1;
+
+ // A token to retrieve next page of results.
+ string next_page_token = 2;
+}
+
+// Request message for starting an image labeling task.
+message LabelImageRequest {
+ // Image labeling task feature.
+ enum Feature {
+ FEATURE_UNSPECIFIED = 0;
+
+ // Label whole image with one or more of labels.
+ CLASSIFICATION = 1;
+
+ // Label image with bounding boxes for labels.
+ BOUNDING_BOX = 2;
+
+ // Label oriented bounding box. The box does not have to be parallel to
+ // horizontal line.
+ ORIENTED_BOUNDING_BOX = 6;
+
+ // Label images with bounding poly. A bounding poly is a plane figure that
+ // is bounded by a finite chain of straight line segments closing in a loop.
+ BOUNDING_POLY = 3;
+
+ // Label images with polyline. Polyline is formed by connected line segments
+ // which are not in closed form.
+ POLYLINE = 4;
+
+ // Label images with segmentation. Segmentation is different from bounding
+ // poly since it is more fine-grained, pixel level annotation.
+ SEGMENTATION = 5;
+ }
+
+ // Required. Config for labeling tasks. The type of request config must
+ // match the selected feature.
+ oneof request_config {
+ // Configuration for image classification task.
+ // One of image_classification_config, bounding_poly_config,
+ // polyline_config and segmentation_config is required.
+ ImageClassificationConfig image_classification_config = 4;
+
+ // Configuration for bounding box and bounding poly task.
+ // One of image_classification_config, bounding_poly_config,
+ // polyline_config and segmentation_config is required.
+ BoundingPolyConfig bounding_poly_config = 5;
+
+ // Configuration for polyline task.
+ // One of image_classification_config, bounding_poly_config,
+ // polyline_config and segmentation_config is required.
+ PolylineConfig polyline_config = 6;
+
+ // Configuration for segmentation task.
+ // One of image_classification_config, bounding_poly_config,
+ // polyline_config and segmentation_config is required.
+ SegmentationConfig segmentation_config = 7;
+ }
+
+ // Required. Name of the dataset to request labeling task, format:
+ // projects/{project_id}/datasets/{dataset_id}
+ string parent = 1;
+
+ // Required. Basic human annotation config.
+ HumanAnnotationConfig basic_config = 2;
+
+ // Required. The type of image labeling task.
+ Feature feature = 3;
+}
+
+// Request message for LabelVideo.
+message LabelVideoRequest {
+ // Video labeling task feature.
+ enum Feature {
+ FEATURE_UNSPECIFIED = 0;
+
+ // Label whole video or video segment with one or more labels.
+ CLASSIFICATION = 1;
+
+ // Label objects with bounding box on image frames extracted from the video.
+ OBJECT_DETECTION = 2;
+
+ // Label and track objects in video.
+ OBJECT_TRACKING = 3;
+
+ // Label the range of video for the specified events.
+ EVENT = 4;
+ }
+
+ // Required. Config for labeling tasks. The type of request config must
+ // match the selected feature.
+ oneof request_config {
+ // Configuration for video classification task.
+ // One of video_classification_config, object_detection_config,
+ // object_tracking_config and event_config is required.
+ VideoClassificationConfig video_classification_config = 4;
+
+ // Configuration for video object detection task.
+ // One of video_classification_config, object_detection_config,
+ // object_tracking_config and event_config is required.
+ ObjectDetectionConfig object_detection_config = 5;
+
+ // Configuration for video object tracking task.
+ // One of video_classification_config, object_detection_config,
+ // object_tracking_config and event_config is required.
+ ObjectTrackingConfig object_tracking_config = 6;
+
+ // Configuration for video event task.
+ // One of video_classification_config, object_detection_config,
+ // object_tracking_config and event_config is required.
+ EventConfig event_config = 7;
+ }
+
+ // Required. Name of the dataset to request labeling task, format:
+ // projects/{project_id}/datasets/{dataset_id}
+ string parent = 1;
+
+ // Required. Basic human annotation config.
+ HumanAnnotationConfig basic_config = 2;
+
+ // Required. The type of video labeling task.
+ Feature feature = 3;
+}
+
+// Request message for LabelText.
+message LabelTextRequest {
+ // Text labeling task feature.
+ enum Feature {
+ FEATURE_UNSPECIFIED = 0;
+
+ // Label text content to one of more labels.
+ TEXT_CLASSIFICATION = 1;
+
+ // Label entities and their span in text.
+ TEXT_ENTITY_EXTRACTION = 2;
+ }
+
+ // Required. Config for labeling tasks. The type of request config must
+ // match the selected feature.
+ oneof request_config {
+ // Configuration for text classification task.
+ // One of text_classification_config and text_entity_extraction_config
+ // is required.
+ TextClassificationConfig text_classification_config = 4;
+
+ // Configuration for entity extraction task.
+ // One of text_classification_config and text_entity_extraction_config
+ // is required.
+ TextEntityExtractionConfig text_entity_extraction_config = 5;
+ }
+
+ // Required. Name of the data set to request labeling task, format:
+ // projects/{project_id}/datasets/{dataset_id}
+ string parent = 1;
+
+ // Required. Basic human annotation config.
+ HumanAnnotationConfig basic_config = 2;
+
+ // Required. The type of text labeling task.
+ Feature feature = 6;
+}
+
+// Request message for LabelAudio.
+message LabelAudioRequest {
+ // Audio labeling task feature.
+ enum Feature {
+ FEATURE_UNSPECIFIED = 0;
+
+ // Transcribe the audios into text.
+ AUDIO_TRANSCRIPTION = 1;
+ }
+
+ // Required. Name of the dataset to request labeling task, format:
+ // projects/{project_id}/datasets/{dataset_id}
+ string parent = 1;
+
+ // Required. Basic human annotation config.
+ HumanAnnotationConfig basic_config = 2;
+
+ // Required. The type of audio labeling task.
+ Feature feature = 3;
+}
+
+// Request message for GetExample
+message GetExampleRequest {
+ // Required. Name of example, format:
+ // projects/{project_id}/datasets/{dataset_id}/annotatedDatasets/
+ // {annotated_dataset_id}/examples/{example_id}
+ string name = 1;
+
+ // Optional. An expression for filtering Examples. Filter by
+ // annotation_spec.display_name is supported. Format
+ // "annotation_spec.display_name = {display_name}"
+ string filter = 2;
+}
+
+// Request message for ListExamples.
+message ListExamplesRequest {
+ // Required. Example resource parent.
+ string parent = 1;
+
+ // Optional. An expression for filtering Examples. For annotated datasets that
+ // have annotation spec set, filter by
+ // annotation_spec.display_name is supported. Format
+ // "annotation_spec.display_name = {display_name}"
+ string filter = 2;
+
+ // Optional. Requested page size. Server may return fewer results than
+ // requested. Default value is 100.
+ int32 page_size = 3;
+
+ // Optional. A token identifying a page of results for the server to return.
+ // Typically obtained by
+ // [ListExamplesResponse.next_page_token][google.cloud.datalabeling.v1beta1.ListExamplesResponse.next_page_token] of the previous
+ // [DataLabelingService.ListExamples] call.
+ // Return first page if empty.
+ string page_token = 4;
+}
+
+// Results of listing Examples in and annotated dataset.
+message ListExamplesResponse {
+ // The list of examples to return.
+ repeated Example examples = 1;
+
+ // A token to retrieve next page of results.
+ string next_page_token = 2;
+}
+
+// Request message for CreateAnnotationSpecSet.
+message CreateAnnotationSpecSetRequest {
+ // Required. AnnotationSpecSet resource parent, format:
+ // projects/{project_id}
+ string parent = 1;
+
+ // Required. Annotation spec set to create. Annotation specs must be included.
+ // Only one annotation spec will be accepted for annotation specs with same
+ // display_name.
+ AnnotationSpecSet annotation_spec_set = 2;
+}
+
+// Request message for GetAnnotationSpecSet.
+message GetAnnotationSpecSetRequest {
+ // Required. AnnotationSpecSet resource name, format:
+ // projects/{project_id}/annotationSpecSets/{annotation_spec_set_id}
+ string name = 1;
+}
+
+// Request message for ListAnnotationSpecSets.
+message ListAnnotationSpecSetsRequest {
+ // Required. Parent of AnnotationSpecSet resource, format:
+ // projects/{project_id}
+ string parent = 1;
+
+ // Optional. Filter is not supported at this moment.
+ string filter = 2;
+
+ // Optional. Requested page size. Server may return fewer results than
+ // requested. Default value is 100.
+ int32 page_size = 3;
+
+ // Optional. A token identifying a page of results for the server to return.
+ // Typically obtained by
+ // [ListAnnotationSpecSetsResponse.next_page_token][google.cloud.datalabeling.v1beta1.ListAnnotationSpecSetsResponse.next_page_token] of the previous
+ // [DataLabelingService.ListAnnotationSpecSets] call.
+ // Return first page if empty.
+ string page_token = 4;
+}
+
+// Results of listing annotation spec set under a project.
+message ListAnnotationSpecSetsResponse {
+ // The list of annotation spec sets.
+ repeated AnnotationSpecSet annotation_spec_sets = 1;
+
+ // A token to retrieve next page of results.
+ string next_page_token = 2;
+}
+
+// Request message for DeleteAnnotationSpecSet.
+message DeleteAnnotationSpecSetRequest {
+ // Required. AnnotationSpec resource name, format:
+ // `projects/{project_id}/annotationSpecSets/{annotation_spec_set_id}`.
+ string name = 1;
+}
+
+// Request message for CreateInstruction.
+message CreateInstructionRequest {
+ // Required. Instruction resource parent, format:
+ // projects/{project_id}
+ string parent = 1;
+
+ // Required. Instruction of how to perform the labeling task.
+ Instruction instruction = 2;
+}
+
+// Request message for GetInstruction.
+message GetInstructionRequest {
+ // Required. Instruction resource name, format:
+ // projects/{project_id}/instructions/{instruction_id}
+ string name = 1;
+}
+
+// Request message for DeleteInstruction.
+message DeleteInstructionRequest {
+ // Required. Instruction resource name, format:
+ // projects/{project_id}/instructions/{instruction_id}
+ string name = 1;
+}
+
+// Request message for ListInstructions.
+message ListInstructionsRequest {
+ // Required. Instruction resource parent, format:
+ // projects/{project_id}
+ string parent = 1;
+
+ // Optional. Filter is not supported at this moment.
+ string filter = 2;
+
+ // Optional. Requested page size. Server may return fewer results than
+ // requested. Default value is 100.
+ int32 page_size = 3;
+
+ // Optional. A token identifying a page of results for the server to return.
+ // Typically obtained by
+ // [ListInstructionsResponse.next_page_token][google.cloud.datalabeling.v1beta1.ListInstructionsResponse.next_page_token] of the previous
+ // [DataLabelingService.ListInstructions] call.
+ // Return first page if empty.
+ string page_token = 4;
+}
+
+// Results of listing instructions under a project.
+message ListInstructionsResponse {
+ // The list of Instructions to return.
+ repeated Instruction instructions = 1;
+
+ // A token to retrieve next page of results.
+ string next_page_token = 2;
+}
diff --git a/google/cloud/datalabeling/v1beta1/datalabeling_gapic.yaml b/google/cloud/datalabeling/v1beta1/datalabeling_gapic.yaml
new file mode 100644
index 000000000..36197fdd8
--- /dev/null
+++ b/google/cloud/datalabeling/v1beta1/datalabeling_gapic.yaml
@@ -0,0 +1,540 @@
+type: com.google.api.codegen.ConfigProto
+config_schema_version: 1.0.0
+# The settings of generated code in a specific language.
+language_settings:
+ java:
+ package_name: com.google.cloud.datalabeling.v1beta1
+ python:
+ package_name: google.cloud.datalabeling_v1beta1.gapic
+ go:
+ package_name: cloud.google.com/go/datalabeling/apiv1beta1
+ csharp:
+ package_name: Google.Cloud.Datalabeling.V1beta1
+ ruby:
+ package_name: Google::Cloud::Datalabeling::V1beta1
+ php:
+ package_name: Google\Cloud\Datalabeling\V1beta1
+ nodejs:
+ package_name: datalabeling.v1beta1
+ domain_layer_location: google-cloud
+# The configuration for the license header to put on generated files.
+license_header:
+ # The file containing the raw license header without any copyright line(s).
+ license_file: license-header-apache-2.0.txt
+# A list of API interface configurations.
+interfaces:
+# The fully qualified name of the API interface.
+- name: google.cloud.datalabeling.v1beta1.DataLabelingService
+ # A list of resource collection configurations.
+ # Consists of a name_pattern and an entity_name.
+ # The name_pattern is a pattern to describe the names of the resources of this
+ # collection, using the platform's conventions for URI patterns. A generator
+ # may use this to generate methods to compose and decompose such names. The
+ # pattern should use named placeholders as in `shelves/{shelf}/books/{book}`;
+ # those will be taken as hints for the parameter names of the generated
+ # methods. If empty, no name methods are generated.
+ # The entity_name is the name to be used as a basis for generated methods and
+ # classes.
+ collections:
+ - name_pattern: projects/{project}
+ entity_name: project
+ - name_pattern: projects/{project}/annotationSpecSets/{annotation_spec_set}
+ entity_name: annotation_spec_set
+ - name_pattern: projects/{project}/datasets/{dataset}
+ entity_name: dataset
+ - name_pattern: projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}
+ entity_name: annotated_dataset
+ - name_pattern: projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}/examples/{example}
+ entity_name: example
+ - name_pattern: projects/{project}/datasets/{dataset}/dataItems/{data_item}
+ entity_name: data_item
+ - name_pattern: projects/{project}/instructions/{instruction}
+ entity_name: instruction
+ # Definition for retryable codes.
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - DEADLINE_EXCEEDED
+ - UNAVAILABLE
+ - name: non_idempotent
+ retry_codes: []
+ # Definition for retry/backoff parameters.
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 30000
+ initial_rpc_timeout_millis: 20000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 20000
+ total_timeout_millis: 300000
+ # A list of method configurations.
+ # Common properties:
+ #
+ # name - The simple name of the method.
+ #
+ # flattening - Specifies the configuration for parameter flattening.
+ # Describes the parameter groups for which a generator should produce method
+ # overloads which allow a client to directly pass request message fields as
+ # method parameters. This information may or may not be used, depending on
+ # the target language.
+ # Consists of groups, which each represent a list of parameters to be
+ # flattened. Each parameter listed must be a field of the request message.
+ #
+ # required_fields - Fields that are always required for a request to be
+ # valid.
+ #
+ # resource_name_treatment - An enum that specifies how to treat the resource
+ # name formats defined in the field_name_patterns and
+ # response_field_name_patterns fields.
+ # UNSET: default value
+ # NONE: the collection configs will not be used by the generated code.
+ # VALIDATE: string fields will be validated by the client against the
+ # specified resource name formats.
+ # STATIC_TYPES: the client will use generated types for resource names.
+ #
+ # page_streaming - Specifies the configuration for paging.
+ # Describes information for generating a method which transforms a paging
+ # list RPC into a stream of resources.
+ # Consists of a request and a response.
+ # The request specifies request information of the list method. It defines
+ # which fields match the paging pattern in the request. The request consists
+ # of a page_size_field and a token_field. The page_size_field is the name of
+ # the optional field specifying the maximum number of elements to be
+ # returned in the response. The token_field is the name of the field in the
+ # request containing the page token.
+ # The response specifies response information of the list method. It defines
+ # which fields match the paging pattern in the response. The response
+ # consists of a token_field and a resources_field. The token_field is the
+ # name of the field in the response containing the next page token. The
+ # resources_field is the name of the field in the response containing the
+ # list of resources belonging to the page.
+ #
+ # retry_codes_name - Specifies the configuration for retryable codes. The
+ # name must be defined in interfaces.retry_codes_def.
+ #
+ # retry_params_name - Specifies the configuration for retry/backoff
+ # parameters. The name must be defined in interfaces.retry_params_def.
+ #
+ # field_name_patterns - Maps the field name of the request type to
+ # entity_name of interfaces.collections.
+ # Specifies the string pattern that the field must follow.
+ #
+ # timeout_millis - Specifies the default timeout for a non-retrying call. If
+ # the call is retrying, refer to retry_params_name instead.
+ methods:
+ - name: CreateDataset
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - dataset
+ required_fields:
+ - parent
+ - dataset
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: project
+ timeout_millis: 30000
+ - name: GetDataset
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: dataset
+ timeout_millis: 30000
+ - name: ListDatasets
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - filter
+ required_fields:
+ - parent
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: datasets
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: project
+ timeout_millis: 30000
+ - name: DeleteDataset
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: dataset
+ timeout_millis: 30000
+ - name: ImportData
+ flattening:
+ groups:
+ - parameters:
+ - name
+ - input_config
+ required_fields:
+ - name
+ - input_config
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: dataset
+ long_running:
+ return_type: google.cloud.datalabeling.v1beta1.ImportDataOperationResponse
+ metadata_type: google.cloud.datalabeling.v1beta1.ImportDataOperationMetadata
+ initial_poll_delay_millis: 500
+ poll_delay_multiplier: 1.5
+ max_poll_delay_millis: 5000
+ total_poll_timeout_millis: 300000
+ timeout_millis: 30000
+ - name: ExportData
+ flattening:
+ groups:
+ - parameters:
+ - name
+ - annotated_dataset
+ - filter
+ - output_config
+ required_fields:
+ - name
+ - annotated_dataset
+ - output_config
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: dataset
+ long_running:
+ return_type: google.cloud.datalabeling.v1beta1.ExportDataOperationResponse
+ metadata_type: google.cloud.datalabeling.v1beta1.ExportDataOperationMetadata
+ initial_poll_delay_millis: 500
+ poll_delay_multiplier: 1.5
+ max_poll_delay_millis: 5000
+ total_poll_timeout_millis: 300000
+ timeout_millis: 30000
+ - name: GetDataItem
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: data_item
+ timeout_millis: 30000
+ - name: ListDataItems
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - filter
+ required_fields:
+ - parent
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: data_items
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: dataset
+ timeout_millis: 30000
+ - name: GetAnnotatedDataset
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: annotated_dataset
+ timeout_millis: 30000
+ - name: ListAnnotatedDatasets
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - filter
+ required_fields:
+ - parent
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: annotated_datasets
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: dataset
+ timeout_millis: 30000
+ - name: LabelImage
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - basic_config
+ - feature
+ required_fields:
+ - parent
+ - basic_config
+ - feature
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: dataset
+ long_running:
+ return_type: google.cloud.datalabeling.v1beta1.AnnotatedDataset
+ metadata_type: google.cloud.datalabeling.v1beta1.LabelOperationMetadata
+ initial_poll_delay_millis: 500
+ poll_delay_multiplier: 1.5
+ max_poll_delay_millis: 5000
+ total_poll_timeout_millis: 300000
+ timeout_millis: 30000
+ - name: LabelVideo
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - basic_config
+ - feature
+ required_fields:
+ - parent
+ - basic_config
+ - feature
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: dataset
+ long_running:
+ return_type: google.cloud.datalabeling.v1beta1.AnnotatedDataset
+ metadata_type: google.cloud.datalabeling.v1beta1.LabelOperationMetadata
+ initial_poll_delay_millis: 500
+ poll_delay_multiplier: 1.5
+ max_poll_delay_millis: 5000
+ total_poll_timeout_millis: 300000
+ timeout_millis: 30000
+ - name: LabelText
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - basic_config
+ - feature
+ required_fields:
+ - parent
+ - basic_config
+ - feature
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: dataset
+ long_running:
+ return_type: google.cloud.datalabeling.v1beta1.AnnotatedDataset
+ metadata_type: google.cloud.datalabeling.v1beta1.LabelOperationMetadata
+ initial_poll_delay_millis: 500
+ poll_delay_multiplier: 1.5
+ max_poll_delay_millis: 5000
+ total_poll_timeout_millis: 300000
+ timeout_millis: 30000
+ - name: LabelAudio
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - basic_config
+ - feature
+ required_fields:
+ - parent
+ - basic_config
+ - feature
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: dataset
+ long_running:
+ return_type: google.cloud.datalabeling.v1beta1.AnnotatedDataset
+ metadata_type: google.cloud.datalabeling.v1beta1.LabelOperationMetadata
+ initial_poll_delay_millis: 500
+ poll_delay_multiplier: 1.5
+ max_poll_delay_millis: 5000
+ total_poll_timeout_millis: 300000
+ timeout_millis: 30000
+ - name: GetExample
+ flattening:
+ groups:
+ - parameters:
+ - name
+ - filter
+ required_fields:
+ - name
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: example
+ timeout_millis: 30000
+ - name: ListExamples
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - filter
+ required_fields:
+ - parent
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: examples
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: annotated_dataset
+ timeout_millis: 30000
+ - name: CreateAnnotationSpecSet
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - annotation_spec_set
+ required_fields:
+ - parent
+ - annotation_spec_set
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: project
+ timeout_millis: 30000
+ - name: GetAnnotationSpecSet
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: annotation_spec_set
+ timeout_millis: 30000
+ - name: ListAnnotationSpecSets
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - filter
+ required_fields:
+ - parent
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: annotation_spec_sets
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: project
+ timeout_millis: 30000
+ - name: DeleteAnnotationSpecSet
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: annotation_spec_set
+ timeout_millis: 30000
+ - name: CreateInstruction
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - instruction
+ required_fields:
+ - parent
+ - instruction
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: project
+ long_running:
+ return_type: google.cloud.datalabeling.v1beta1.Instruction
+ metadata_type: google.cloud.datalabeling.v1beta1.CreateInstructionMetadata
+ initial_poll_delay_millis: 500
+ poll_delay_multiplier: 1.5
+ max_poll_delay_millis: 5000
+ total_poll_timeout_millis: 300000
+ timeout_millis: 30000
+ - name: GetInstruction
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: instruction
+ timeout_millis: 30000
+ - name: ListInstructions
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - filter
+ required_fields:
+ - parent
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: instructions
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: project
+ timeout_millis: 30000
+ - name: DeleteInstruction
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: instruction
+ timeout_millis: 30000
diff --git a/google/cloud/datalabeling/v1beta1/dataset.proto b/google/cloud/datalabeling/v1beta1/dataset.proto
new file mode 100644
index 000000000..952daf38d
--- /dev/null
+++ b/google/cloud/datalabeling/v1beta1/dataset.proto
@@ -0,0 +1,306 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.datalabeling.v1beta1;
+
+import "google/cloud/datalabeling/v1beta1/annotation.proto";
+import "google/cloud/datalabeling/v1beta1/human_annotation_config.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/struct.proto";
+import "google/protobuf/timestamp.proto";
+import "google/api/annotations.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/datalabeling/v1beta1;datalabeling";
+option java_multiple_files = true;
+option java_package = "com.google.cloud.datalabeling.v1beta1";
+
+
+enum DataType {
+ DATA_TYPE_UNSPECIFIED = 0;
+
+ IMAGE = 1;
+
+ VIDEO = 2;
+
+ TEXT = 4;
+
+ AUDIO = 5;
+}
+
+// Dataset is the resource to hold your data. You can request multiple labeling
+// tasks for a dataset while each one will generate an AnnotatedDataset.
+message Dataset {
+ // Output only.
+ // Dataset resource name, format is:
+ // projects/{project_id}/datasets/{dataset_id}
+ string name = 1;
+
+ // Required. The display name of the dataset. Maximum of 64 characters.
+ string display_name = 2;
+
+ // Optional. User-provided description of the annotation specification set.
+ // The description can be up to 10000 characters long.
+ string description = 3;
+
+ // Output only. Time the dataset is created.
+ google.protobuf.Timestamp create_time = 4;
+
+ // Output only. This is populated with the original input configs
+ // where ImportData is called. It is available only after the clients
+ // import data to this dataset.
+ repeated InputConfig input_configs = 5;
+}
+
+// The configuration of input data, including data type, location, etc.
+message InputConfig {
+ // Required. Where the data is from.
+ oneof source {
+ GcsSource gcs_source = 2;
+ }
+
+ // Required. Data type must be specifed when user tries to import data.
+ DataType data_type = 1;
+}
+
+// Source of the GCS file to be imported. Only gcs path is allowed in
+// input_uri.
+message GcsSource {
+ // Required. The input uri of source file.
+ string input_uri = 1;
+
+ // Required. The format of the gcs source. Only "text/csv" is supported.
+ string mime_type = 2;
+}
+
+// The configuration of output data.
+message OutputConfig {
+ // Required. Location to output data to.
+ oneof destination {
+ // Output to a GCS file. Should be used for labeling output other than Audio
+ // transcription.
+ GcsDestination gcs_destination = 1;
+
+ // Output to a GCS folder. Should be used for Audio transcription
+ // labeling output.
+ GcsFolderDestination gcs_folder_destination = 2;
+ }
+}
+
+// Export destination of the data.Only gcs path is allowed in
+// output_uri.
+message GcsDestination {
+ // Required. The output uri of destination file.
+ string output_uri = 1;
+
+ // Required. The format of the gcs destination. Only "text/csv" and
+ // "application/json"
+ // are supported.
+ string mime_type = 2;
+}
+
+// Export folder destination of the data.
+message GcsFolderDestination {
+ // Required. GCS folder to export data to.
+ string output_folder_uri = 1;
+}
+
+// DataItem is a piece of data, without annotation. For example, an image.
+message DataItem {
+ // Output only.
+ oneof payload {
+ // The image payload, a container of the image bytes/uri.
+ ImagePayload image_payload = 2;
+
+ // The text payload, a container of text content.
+ TextPayload text_payload = 3;
+
+ // The video payload, a container of the video uri.
+ VideoPayload video_payload = 4;
+
+ // The audio payload, a container of the audio uri.
+ AudioPayload audio_payload = 5;
+ }
+
+ // Output only. Name of the data item, in format of:
+ // projects/{project_id}/datasets/{dataset_id}/dataItems/{data_item_id}
+ string name = 1;
+}
+
+// AnnotatedDataset is a set holding annotations for data in a Dataset. Each
+// labeling task will generate an AnnotatedDataset under the Dataset that the
+// task is requested for.
+message AnnotatedDataset {
+ // Output only.
+ // AnnotatedDataset resource name in format of:
+ // projects/{project_id}/datasets/{dataset_id}/annotatedDatasets/
+ // {annotated_dataset_id}
+ string name = 1;
+
+ // Output only. The display name of the AnnotatedDataset. It is specified in
+ // HumanAnnotationConfig when user starts a labeling task. Maximum of 64
+ // characters.
+ string display_name = 2;
+
+ // Output only. The description of the AnnotatedDataset. It is specified in
+ // HumanAnnotationConfig when user starts a labeling task. Maximum of 10000
+ // characters.
+ string description = 9;
+
+ // Output only. Source of the annotation.
+ AnnotationSource annotation_source = 3;
+
+ // Output only. Type of the annotation. It is specified when starting labeling
+ // task.
+ AnnotationType annotation_type = 8;
+
+ // Output only. Number of examples in the annotated dataset.
+ int64 example_count = 4;
+
+ // Output only. Number of examples that have annotation in the annotated
+ // dataset.
+ int64 completed_example_count = 5;
+
+ // Output only. Per label statistics.
+ LabelStats label_stats = 6;
+
+ // Output only. Time the AnnotatedDataset was created.
+ google.protobuf.Timestamp create_time = 7;
+
+ // Output only. Additional information about AnnotatedDataset.
+ AnnotatedDatasetMetadata metadata = 10;
+}
+
+// Metadata on AnnotatedDataset.
+message AnnotatedDatasetMetadata {
+ // HumanAnnotationConfig used when requesting the human labeling task for this
+ // AnnotatedDataset.
+ HumanAnnotationConfig human_annotation_config = 1;
+
+ // Specific request configuration used when requesting the labeling task.
+ oneof annotation_request_config {
+ // Configuration for image classification task.
+ ImageClassificationConfig image_classification_config = 2;
+ // Configuration for image bounding box and bounding poly task.
+ BoundingPolyConfig bounding_poly_config = 3;
+ // Configuration for image polyline task.
+ PolylineConfig polyline_config = 4;
+ // Configuration for image segmentation task.
+ SegmentationConfig segmentation_config = 5;
+ // Configuration for video classification task.
+ VideoClassificationConfig video_classification_config = 6;
+ // Configuration for video object detection task.
+ ObjectDetectionConfig object_detection_config = 7;
+ // Configuration for video object tracking task.
+ ObjectTrackingConfig object_tracking_config = 8;
+ // Configuration for video event labeling task.
+ EventConfig event_config = 9;
+ // Configuration for text classification task.
+ TextClassificationConfig text_classification_config = 10;
+ // Configuration for text entity extraction task.
+ TextEntityExtractionConfig text_entity_extraction_config = 11;
+ }
+}
+
+// Statistics about annotation specs.
+message LabelStats {
+ // Map of each annotation spec's example count. Key is the annotation spec
+ // name and value is the number of examples for that annotation spec.
+ map<string, int64> example_count = 1;
+}
+
+// An Example is a piece of data and its annotation. For example, an image with
+// label "house".
+message Example {
+ // Output only. The data part of Example.
+ oneof payload {
+ // The image payload, a container of the image bytes/uri.
+ ImagePayload image_payload = 2;
+
+ // The text payload, a container of the text content.
+ TextPayload text_payload = 6;
+
+ // The video payload, a container of the video uri.
+ VideoPayload video_payload = 7;
+
+ // The audio payload, a container of the audio uri.
+ AudioPayload audio_payload = 8;
+ }
+
+ // Output only. Name of the example, in format of:
+ // projects/{project_id}/datasets/{dataset_id}/annotatedDatasets/
+ // {annotated_dataset_id}/examples/{example_id}
+ string name = 1;
+
+ // Output only. Annotations for the piece of data in Example.
+ // One piece of data can have multiple annotations.
+ repeated Annotation annotations = 5;
+}
+
+// Container of information about an image.
+message ImagePayload {
+ // Image format.
+ string mime_type = 1;
+
+ // A byte string of a full image.
+ bytes image_thumbnail = 2;
+
+ // Image uri from the user bucket.
+ string image_uri = 3;
+}
+
+// Container of information about a piece of text.
+message TextPayload {
+ // Text content.
+ string text_content = 1;
+}
+
+// Container of information of a video thumbnail.
+message VideoThumbnail {
+ // A byte string of the video frame.
+ bytes thumbnail = 1;
+
+ // Time offset relative to the beginning of the video, corresponding to the
+ // video frame where the thumbnail has been extracted from.
+ google.protobuf.Duration time_offset = 2;
+}
+
+// Container of information of a video.
+message VideoPayload {
+ // Video format.
+ string mime_type = 1;
+
+ // Video uri from the user bucket.
+ string video_uri = 2;
+
+ // The list of video thumbnails.
+ repeated VideoThumbnail video_thumbnails = 3;
+
+ // FPS of the video.
+ float frame_rate = 4;
+}
+
+// Container of information of an audio.
+message AudioPayload {
+ // Audio uri in user bucket.
+ string audio_uri = 1;
+
+ // Sample rate in Hertz of the audio data sent in all
+ // `RecognitionAudio` messages. This field is optional for `FLAC` and `WAV`
+ // audio files and required for all other audio formats. For details,
+ // see [AudioEncoding][google.cloud.datalabeling.v1beta1.AudioPayload.AudioEncoding].
+ int32 sample_rate_hertz = 3;
+}
diff --git a/google/cloud/datalabeling/v1beta1/human_annotation_config.proto b/google/cloud/datalabeling/v1beta1/human_annotation_config.proto
new file mode 100644
index 000000000..96672aeda
--- /dev/null
+++ b/google/cloud/datalabeling/v1beta1/human_annotation_config.proto
@@ -0,0 +1,208 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.datalabeling.v1beta1;
+
+import "google/protobuf/duration.proto";
+import "google/api/annotations.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/datalabeling/v1beta1;datalabeling";
+option java_multiple_files = true;
+option java_package = "com.google.cloud.datalabeling.v1beta1";
+
+
+enum StringAggregationType {
+ STRING_AGGREGATION_TYPE_UNSPECIFIED = 0;
+
+ // Majority vote to aggregate answers.
+ MAJORITY_VOTE = 1;
+
+ // Unanimous answers will be adopted.
+ UNANIMOUS_VOTE = 2;
+
+ // Preserve all answers by crowd compute.
+ NO_AGGREGATION = 3;
+}
+
+// Configuration for how human labeling task should be done.
+message HumanAnnotationConfig {
+ // Required except for LabelAudio case. Instruction resource name.
+ string instruction = 1;
+
+ // Required. A human-readable name for AnnotatedDataset defined by
+ // users. Maximum of 64 characters
+ // .
+ string annotated_dataset_display_name = 2;
+
+ // Optional. A human-readable description for AnnotatedDataset.
+ // The description can be up to 10000 characters long.
+ string annotated_dataset_description = 3;
+
+ // Optional. A human-readable label used to logically group labeling tasks.
+ // This string must match the regular expression `[a-zA-Z\\d_-]{0,128}`.
+ string label_group = 4;
+
+ // Optional. The Language of this question, as a
+ // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt).
+ // Default value is en-US.
+ // Only need to set this when task is language related. For example, French
+ // text classification or Chinese audio transcription.
+ string language_code = 5;
+
+ // Optional. Replication of questions. Each question will be sent to up to
+ // this number of contributors to label. Aggregated answers will be returned.
+ // Default is set to 1.
+ // For image related labeling, valid values are 1, 3, 5.
+ int32 replica_count = 6;
+
+ // Optional. Maximum duration for contributors to answer a question. Default
+ // is 1800 seconds.
+ google.protobuf.Duration question_duration = 7;
+
+ // Optional. If you want your own labeling contributors to manage and work on
+ // this labeling request, you can set these contributors here. We will give
+ // them access to the question types in crowdcompute. Note that these
+ // emails must be registered in crowdcompute worker UI:
+ // https://crowd-compute.appspot.com/
+ repeated string contributor_emails = 9;
+}
+
+// Config for image classification human labeling task.
+message ImageClassificationConfig {
+ // Required. Annotation spec set resource name.
+ string annotation_spec_set = 1;
+
+ // Optional. If allow_multi_label is true, contributors are able to choose
+ // multiple labels for one image.
+ bool allow_multi_label = 2;
+
+ // Optional. The type of how to aggregate answers.
+ StringAggregationType answer_aggregation_type = 3;
+}
+
+// Config for image bounding poly (and bounding box) human labeling task.
+message BoundingPolyConfig {
+ // Required. Annotation spec set resource name.
+ string annotation_spec_set = 1;
+
+ // Optional. Instruction message showed on contributors UI.
+ string instruction_message = 2;
+}
+
+// Config for image polyline human labeling task.
+message PolylineConfig {
+ // Required. Annotation spec set resource name.
+ string annotation_spec_set = 1;
+
+ // Optional. Instruction message showed on contributors UI.
+ string instruction_message = 2;
+}
+
+// Config for image segmentation
+message SegmentationConfig {
+ // Required. Annotation spec set resource name. format:
+ // projects/{project_id}/annotationSpecSets/{annotation_spec_set_id}
+ string annotation_spec_set = 1;
+
+ // Instruction message showed on labelers UI.
+ string instruction_message = 2;
+}
+
+// Config for video classification human labeling task.
+// Currently two types of video classification are supported:
+// 1. Assign labels on the entire video.
+// 2. Split the video into multiple video clips based on camera shot, and
+// assign labels on each video clip.
+message VideoClassificationConfig {
+ // Annotation spec set with the setting of allowing multi labels or not.
+ message AnnotationSpecSetConfig {
+ // Required. Annotation spec set resource name.
+ string annotation_spec_set = 1;
+
+ // Optional. If allow_multi_label is true, contributors are able to
+ // choose multiple labels from one annotation spec set.
+ bool allow_multi_label = 2;
+ }
+
+ // Required. The list of annotation spec set configs.
+ // Since watching a video clip takes much longer time than an image, we
+ // support label with multiple AnnotationSpecSet at the same time. Labels
+ // in each AnnotationSpecSet will be shown in a group to contributors.
+ // Contributors can select one or more (depending on whether to allow multi
+ // label) from each group.
+ repeated AnnotationSpecSetConfig annotation_spec_set_configs = 1;
+
+ // Optional. Option to apply shot detection on the video.
+ bool apply_shot_detection = 2;
+}
+
+// Config for video object detection human labeling task.
+// Object detection will be conducted on the images extracted from the video,
+// and those objects will be labeled with bounding boxes.
+// User need to specify the number of images to be extracted per second as the
+// extraction frame rate.
+message ObjectDetectionConfig {
+ // Required. Annotation spec set resource name.
+ string annotation_spec_set = 1;
+
+ // Optional. Instruction message showed on labelers UI.
+ string instruction_message = 2;
+
+ // Required. Number of frames per second to be extracted from the video.
+ double extraction_frame_rate = 3;
+}
+
+// Config for video object tracking human labeling task.
+message ObjectTrackingConfig {
+ // Required. Annotation spec set resource name.
+ string annotation_spec_set = 1;
+}
+
+// Config for video event human labeling task.
+message EventConfig {
+ // Required. The list of annotation spec set resource name. Similar to video
+ // classification, we support selecting event from multiple AnnotationSpecSet
+ // at the same time.
+ repeated string annotation_spec_sets = 1;
+}
+
+// Config for text classification human labeling task.
+message TextClassificationConfig {
+ // Optional. If allow_multi_label is true, contributors are able to choose
+ // multiple labels for one text segment.
+ bool allow_multi_label = 1;
+
+ // Required. Annotation spec set resource name.
+ string annotation_spec_set = 2;
+
+ // Optional. Configs for sentiment selection.
+ SentimentConfig sentiment_config = 3;
+}
+
+// Config for setting up sentiments.
+message SentimentConfig {
+ // If set to true, contributors will have the option to select sentiment of
+ // the label they selected, to mark it as negative or positive label. Default
+ // is false.
+ bool enable_label_sentiment_selection = 1;
+}
+
+// Config for text entity extraction human labeling task.
+message TextEntityExtractionConfig {
+ // Required. Annotation spec set resource name.
+ string annotation_spec_set = 1;
+}
diff --git a/google/cloud/datalabeling/v1beta1/instruction.proto b/google/cloud/datalabeling/v1beta1/instruction.proto
new file mode 100644
index 000000000..d1a1e751c
--- /dev/null
+++ b/google/cloud/datalabeling/v1beta1/instruction.proto
@@ -0,0 +1,78 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.datalabeling.v1beta1;
+
+import "google/cloud/datalabeling/v1beta1/dataset.proto";
+import "google/protobuf/timestamp.proto";
+import "google/api/annotations.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/datalabeling/v1beta1;datalabeling";
+option java_multiple_files = true;
+option java_package = "com.google.cloud.datalabeling.v1beta1";
+
+
+// Instruction of how to perform the labeling task for human operators.
+// Currently two types of instruction are supported - CSV file and PDF.
+// One of the two types instruction must be provided.
+// CSV file is only supported for image classification task. Instructions for
+// other task should be provided as PDF.
+// For image classification, CSV and PDF can be provided at the same time.
+message Instruction {
+ // Output only. Instruction resource name, format:
+ // projects/{project_id}/instructions/{instruction_id}
+ string name = 1;
+
+ // Required. The display name of the instruction. Maximum of 64 characters.
+ string display_name = 2;
+
+ // Optional. User-provided description of the instruction.
+ // The description can be up to 10000 characters long.
+ string description = 3;
+
+ // Output only. Creation time of instruction.
+ google.protobuf.Timestamp create_time = 4;
+
+ // Output only. Last update time of instruction.
+ google.protobuf.Timestamp update_time = 5;
+
+ // Required. The data type of this instruction.
+ DataType data_type = 6;
+
+ // One of CSV and PDF instruction is required.
+ // Instruction from a csv file, such as for classification task.
+ // Csv file should have exact two columns, in the format of:
+ // The first column is labeled data, such as image reference, text.
+ // The second column is comma separated labels associated with data.
+ CsvInstruction csv_instruction = 7;
+
+ // One of CSV and PDF instruction is required.
+ // Instruction from a PDF doc. The PDF doc should be in GCS bucket.
+ PdfInstruction pdf_instruction = 9;
+}
+
+// Instruction from a CSV file.
+message CsvInstruction {
+ // CSV file for the instruction. Only gcs path is allowed.
+ string gcs_file_uri = 1;
+}
+
+// Instruction from a PDF file.
+message PdfInstruction {
+ // PDF file for the instruction. Only gcs path is allowed.
+ string gcs_file_uri = 1;
+}
diff --git a/google/cloud/datalabeling/v1beta1/operations.proto b/google/cloud/datalabeling/v1beta1/operations.proto
new file mode 100644
index 000000000..71815e531
--- /dev/null
+++ b/google/cloud/datalabeling/v1beta1/operations.proto
@@ -0,0 +1,206 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.datalabeling.v1beta1;
+
+import "google/cloud/datalabeling/v1beta1/dataset.proto";
+import "google/cloud/datalabeling/v1beta1/human_annotation_config.proto";
+import "google/cloud/datalabeling/v1beta1/instruction.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+import "google/api/annotations.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/datalabeling/v1beta1;datalabeling";
+option java_multiple_files = true;
+option java_package = "com.google.cloud.datalabeling.v1beta1";
+
+// Response used for ImportData longrunning operation.
+message ImportDataOperationResponse {
+ // Ouptut only. The name of imported dataset.
+ string dataset = 1;
+
+ // Output only. Total number of examples requested to import
+ int32 total_count = 2;
+
+ // Output only. Number of examples imported successfully.
+ int32 import_count = 3;
+}
+
+// Response used for ExportDataset longrunning operation.
+message ExportDataOperationResponse {
+ // Ouptut only. The name of dataset.
+ // "projects/*/datasets/*/Datasets/*"
+ string dataset = 1;
+
+ // Output only. Total number of examples requested to export
+ int32 total_count = 2;
+
+ // Output only. Number of examples exported successfully.
+ int32 export_count = 3;
+
+ // Output only. Statistic infos of labels in the exported dataset.
+ LabelStats label_stats = 4;
+
+ // Output only. output_config in the ExportData request.
+ OutputConfig output_config = 5;
+}
+
+// Metadata of an ImportData operation.
+message ImportDataOperationMetadata {
+ // Ouptut only. The name of imported dataset.
+ // "projects/*/datasets/*"
+ string dataset = 1;
+
+ // Output only. Partial failures encountered.
+ // E.g. single files that couldn't be read.
+ // Status details field will contain standard GCP error details.
+ repeated google.rpc.Status partial_failures = 2;
+}
+
+// Metadata of an ExportData operation.
+message ExportDataOperationMetadata {
+ // Output only. The name of dataset to be exported.
+ // "projects/*/datasets/*/Datasets/*"
+ string dataset = 1;
+
+ // Output only. Partial failures encountered.
+ // E.g. single files that couldn't be read.
+ // Status details field will contain standard GCP error details.
+ repeated google.rpc.Status partial_failures = 2;
+}
+
+// Metadata of a labeling operation, such as LabelImage or LabelVideo.
+// Next tag: 16
+message LabelOperationMetadata {
+ // Output only. Progress of label operation. Range: [0, 100].
+ // Currently not supported.
+ int32 progress_percent = 1;
+
+ // Output only. Partial failures encountered.
+ // E.g. single files that couldn't be read.
+ // Status details field will contain standard GCP error details.
+ repeated google.rpc.Status partial_failures = 2;
+
+ // Ouptut only. Details of specific label operation.
+ oneof details {
+ LabelImageClassificationOperationMetadata image_classification_details = 3;
+ LabelImageBoundingBoxOperationMetadata image_bounding_box_details = 4;
+ LabelImageBoundingPolyOperationMetadata image_bounding_poly_details = 11;
+ LabelImageOrientedBoundingBoxOperationMetadata
+ image_oriented_bounding_box_details = 14;
+ LabelImagePolylineOperationMetadata image_polyline_details = 12;
+ LabelImageSegmentationOperationMetadata image_segmentation_details = 15;
+ LabelVideoClassificationOperationMetadata video_classification_details = 5;
+ LabelVideoObjectDetectionOperationMetadata video_object_detection_details =
+ 6;
+ LabelVideoObjectTrackingOperationMetadata video_object_tracking_details = 7;
+ LabelVideoEventOperationMetadata video_event_details = 8;
+ LabelTextClassificationOperationMetadata text_classification_details = 9;
+ LabelAudioTranscriptionOperationMetadata audio_transcription_details = 10;
+ LabelTextEntityExtractionOperationMetadata text_entity_extraction_details =
+ 13;
+ }
+}
+
+// Metadata of a LabelImageClassification operation.
+message LabelImageClassificationOperationMetadata {
+ // Basic human annotation config used in labeling request.
+ HumanAnnotationConfig basic_config = 1;
+}
+
+// Details of a LabelImageBoundingBox operation metadata.
+message LabelImageBoundingBoxOperationMetadata {
+ // Basic human annotation config used in labeling request.
+ HumanAnnotationConfig basic_config = 1;
+}
+
+// Details of a LabelImageOrientedBoundingBox operation metadata.
+message LabelImageOrientedBoundingBoxOperationMetadata {
+ // Basic human annotation config.
+ HumanAnnotationConfig basic_config = 1;
+}
+
+// Details of LabelImageBoundingPoly operation metadata.
+message LabelImageBoundingPolyOperationMetadata {
+ // Basic human annotation config used in labeling request.
+ HumanAnnotationConfig basic_config = 1;
+}
+
+// Details of LabelImagePolyline operation metadata.
+message LabelImagePolylineOperationMetadata {
+ // Basic human annotation config used in labeling request.
+ HumanAnnotationConfig basic_config = 1;
+}
+
+// Details of a LabelImageSegmentation operation metadata.
+message LabelImageSegmentationOperationMetadata {
+ // Basic human annotation config.
+ HumanAnnotationConfig basic_config = 1;
+}
+
+// Details of a LabelVideoClassification operation metadata.
+message LabelVideoClassificationOperationMetadata {
+ // Basic human annotation config used in labeling request.
+ HumanAnnotationConfig basic_config = 1;
+}
+
+// Details of a LabelVideoObjectDetection operation metadata.
+message LabelVideoObjectDetectionOperationMetadata {
+ // Basic human annotation config used in labeling request.
+ HumanAnnotationConfig basic_config = 1;
+}
+
+// Details of a LabelVideoObjectTracking operation metadata.
+message LabelVideoObjectTrackingOperationMetadata {
+ // Basic human annotation config used in labeling request.
+ HumanAnnotationConfig basic_config = 1;
+}
+
+// Details of a LabelVideoEvent operation metadata.
+message LabelVideoEventOperationMetadata {
+ // Basic human annotation config used in labeling request.
+ HumanAnnotationConfig basic_config = 1;
+}
+
+// Details of a LabelTextClassification operation metadata.
+message LabelTextClassificationOperationMetadata {
+ // Basic human annotation config used in labeling request.
+ HumanAnnotationConfig basic_config = 1;
+}
+
+message LabelAudioTranscriptionOperationMetadata {
+ // Basic human annotation config used in labeling request.
+ HumanAnnotationConfig basic_config = 1;
+}
+
+// Details of a LabelTextEntityExtraction operation metadata.
+message LabelTextEntityExtractionOperationMetadata {
+ // Basic human annotation config used in labeling request.
+ HumanAnnotationConfig basic_config = 1;
+}
+
+// Metadata of a CreateInstruction operation.
+message CreateInstructionMetadata {
+ // Output only. The name of the created Instruction.
+ // projects/{project_id}/instructions/{instruction_id}
+ string instruction = 1;
+
+ // Output only. Partial failures encountered.
+ // E.g. single files that couldn't be read.
+ // Status details field will contain standard GCP error details.
+ repeated google.rpc.Status partial_failures = 2;
+}