aboutsummaryrefslogtreecommitdiffhomepage
path: root/third_party/googleapis
diff options
context:
space:
mode:
authorGravatar Jakob Buchgraber <buchgr@google.com>2017-05-30 15:49:37 +0200
committerGravatar Jakob Buchgraber <buchgr@google.com>2017-05-30 15:59:48 +0200
commit84a8e95910f069dd03a19b0fc634f95bb0beac95 (patch)
treed3a5cd8e259799e36834b247194c2d1aa3163e16 /third_party/googleapis
parentc7696b47a4a12b1e56e41246770cbd44ad1c9c3e (diff)
Introduce third_party/googleapis
Add `https://github.com/googleapis/googleapis` as a third_party dependency at commit `001f6702ac4cd72194a5120ff978fcfa740783d6`. These protos are required for the upcoming open sourcing of the BES protocol code. Additionally, add (java_)proto_library() rules for the protobufs required by the BES protocol. Change-Id: Ie78a9941a62f2085a58ad859c91161885e6f390d
Diffstat (limited to 'third_party/googleapis')
-rw-r--r--third_party/googleapis/BUILD126
-rw-r--r--third_party/googleapis/LICENSE201
-rw-r--r--third_party/googleapis/README.md5
-rw-r--r--third_party/googleapis/google/api/README.md5
-rw-r--r--third_party/googleapis/google/api/annotations.proto31
-rw-r--r--third_party/googleapis/google/api/auth.proto183
-rw-r--r--third_party/googleapis/google/api/backend.proto47
-rw-r--r--third_party/googleapis/google/api/billing.proto98
-rw-r--r--third_party/googleapis/google/api/config_change.proto85
-rw-r--r--third_party/googleapis/google/api/consumer.proto83
-rw-r--r--third_party/googleapis/google/api/context.proto63
-rw-r--r--third_party/googleapis/google/api/control.proto33
-rw-r--r--third_party/googleapis/google/api/distribution.proto185
-rw-r--r--third_party/googleapis/google/api/documentation.proto159
-rw-r--r--third_party/googleapis/google/api/endpoint.proto74
-rw-r--r--third_party/googleapis/google/api/experimental/authorization_config.proto40
-rw-r--r--third_party/googleapis/google/api/experimental/experimental.proto34
-rw-r--r--third_party/googleapis/google/api/http.proto291
-rw-r--r--third_party/googleapis/google/api/httpbody.proto70
-rw-r--r--third_party/googleapis/google/api/label.proto49
-rw-r--r--third_party/googleapis/google/api/log.proto55
-rw-r--r--third_party/googleapis/google/api/logging.proto83
-rw-r--r--third_party/googleapis/google/api/metric.proto196
-rw-r--r--third_party/googleapis/google/api/monitored_resource.proto91
-rw-r--r--third_party/googleapis/google/api/monitoring.proto89
-rw-r--r--third_party/googleapis/google/api/quota.proto259
-rw-r--r--third_party/googleapis/google/api/service.proto176
-rw-r--r--third_party/googleapis/google/api/servicecontrol/README.md126
-rw-r--r--third_party/googleapis/google/api/servicecontrol/v1/check_error.proto95
-rw-r--r--third_party/googleapis/google/api/servicecontrol/v1/distribution.proto159
-rw-r--r--third_party/googleapis/google/api/servicecontrol/v1/log_entry.proto67
-rw-r--r--third_party/googleapis/google/api/servicecontrol/v1/metric_value.proto78
-rw-r--r--third_party/googleapis/google/api/servicecontrol/v1/operation.proto112
-rw-r--r--third_party/googleapis/google/api/servicecontrol/v1/service_controller.proto161
-rw-r--r--third_party/googleapis/google/api/servicemanagement/README.md102
-rw-r--r--third_party/googleapis/google/api/servicemanagement/v1/resources.proto286
-rw-r--r--third_party/googleapis/google/api/servicemanagement/v1/servicemanager.proto392
-rw-r--r--third_party/googleapis/google/api/source_info.proto32
-rw-r--r--third_party/googleapis/google/api/system_parameter.proto96
-rw-r--r--third_party/googleapis/google/api/usage.proto85
-rw-r--r--third_party/googleapis/google/appengine/README.md12
-rw-r--r--third_party/googleapis/google/appengine/legacy/audit_data.proto34
-rw-r--r--third_party/googleapis/google/appengine/logging/v1/request_log.proto190
-rw-r--r--third_party/googleapis/google/appengine/v1/app_yaml.proto285
-rw-r--r--third_party/googleapis/google/appengine/v1/appengine.proto341
-rw-r--r--third_party/googleapis/google/appengine/v1/application.proto112
-rw-r--r--third_party/googleapis/google/appengine/v1/deploy.proto78
-rw-r--r--third_party/googleapis/google/appengine/v1/instance.proto121
-rw-r--r--third_party/googleapis/google/appengine/v1/location.proto39
-rw-r--r--third_party/googleapis/google/appengine/v1/operation.proto56
-rw-r--r--third_party/googleapis/google/appengine/v1/service.proto83
-rw-r--r--third_party/googleapis/google/appengine/v1/version.proto378
-rw-r--r--third_party/googleapis/google/assistant/embedded/README.md3
-rw-r--r--third_party/googleapis/google/assistant/embedded/v1alpha1/embedded_assistant.proto281
-rw-r--r--third_party/googleapis/google/bigtable/admin/bigtableadmin.yaml76
-rw-r--r--third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_data.proto126
-rw-r--r--third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_service.proto80
-rw-r--r--third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_service_messages.proto116
-rw-r--r--third_party/googleapis/google/bigtable/admin/v2/bigtable_instance_admin.proto233
-rw-r--r--third_party/googleapis/google/bigtable/admin/v2/bigtable_table_admin.proto214
-rw-r--r--third_party/googleapis/google/bigtable/admin/v2/common.proto38
-rw-r--r--third_party/googleapis/google/bigtable/admin/v2/instance.proto130
-rw-r--r--third_party/googleapis/google/bigtable/admin/v2/table.proto118
-rw-r--r--third_party/googleapis/google/bigtable/bigtable.yaml33
-rw-r--r--third_party/googleapis/google/bigtable/v1/bigtable_data.proto516
-rw-r--r--third_party/googleapis/google/bigtable/v1/bigtable_service.proto74
-rw-r--r--third_party/googleapis/google/bigtable/v1/bigtable_service_messages.proto218
-rw-r--r--third_party/googleapis/google/bigtable/v2/bigtable.proto322
-rw-r--r--third_party/googleapis/google/bigtable/v2/bigtable_gapic.yaml137
-rw-r--r--third_party/googleapis/google/bigtable/v2/data.proto533
-rw-r--r--third_party/googleapis/google/bytestream/bytestream.proto181
-rw-r--r--third_party/googleapis/google/cloud/audit/audit_log.proto128
-rw-r--r--third_party/googleapis/google/cloud/billing/v1/cloud_billing.proto214
-rw-r--r--third_party/googleapis/google/cloud/dataproc/v1/clusters.proto444
-rw-r--r--third_party/googleapis/google/cloud/dataproc/v1/jobs.proto573
-rw-r--r--third_party/googleapis/google/cloud/dataproc/v1/operations.proto79
-rw-r--r--third_party/googleapis/google/cloud/functions/README.md2
-rw-r--r--third_party/googleapis/google/cloud/functions/functions.yaml20
-rw-r--r--third_party/googleapis/google/cloud/functions/v1beta2/functions.proto295
-rw-r--r--third_party/googleapis/google/cloud/functions/v1beta2/functions_gapic.yaml145
-rw-r--r--third_party/googleapis/google/cloud/functions/v1beta2/operations.proto54
-rw-r--r--third_party/googleapis/google/cloud/language/README.md0
-rw-r--r--third_party/googleapis/google/cloud/language/language_v1.yaml19
-rw-r--r--third_party/googleapis/google/cloud/language/language_v1beta2.yaml19
-rw-r--r--third_party/googleapis/google/cloud/language/v1/language_gapic.yaml93
-rw-r--r--third_party/googleapis/google/cloud/language/v1/language_service.proto948
-rw-r--r--third_party/googleapis/google/cloud/language/v1beta1/language_gapic.yaml84
-rw-r--r--third_party/googleapis/google/cloud/language/v1beta1/language_service.proto950
-rw-r--r--third_party/googleapis/google/cloud/language/v1beta2/language_gapic.yaml106
-rw-r--r--third_party/googleapis/google/cloud/language/v1beta2/language_service.proto989
-rw-r--r--third_party/googleapis/google/cloud/ml/v1/job_service.proto605
-rw-r--r--third_party/googleapis/google/cloud/ml/v1/model_service.proto371
-rw-r--r--third_party/googleapis/google/cloud/ml/v1/operation_metadata.proto72
-rw-r--r--third_party/googleapis/google/cloud/ml/v1/prediction_service.proto240
-rw-r--r--third_party/googleapis/google/cloud/ml/v1/project_service.proto59
-rw-r--r--third_party/googleapis/google/cloud/ml/v1beta1/job_service.proto605
-rw-r--r--third_party/googleapis/google/cloud/ml/v1beta1/model_service.proto371
-rw-r--r--third_party/googleapis/google/cloud/ml/v1beta1/operation_metadata.proto72
-rw-r--r--third_party/googleapis/google/cloud/ml/v1beta1/prediction_service.proto240
-rw-r--r--third_party/googleapis/google/cloud/ml/v1beta1/project_service.proto59
-rw-r--r--third_party/googleapis/google/cloud/runtimeconfig/README.md39
-rw-r--r--third_party/googleapis/google/cloud/runtimeconfig/v1beta1/resources.proto208
-rw-r--r--third_party/googleapis/google/cloud/runtimeconfig/v1beta1/runtimeconfig.proto410
-rw-r--r--third_party/googleapis/google/cloud/speech/README.md3
-rw-r--r--third_party/googleapis/google/cloud/speech/cloud_speech_v1.yaml20
-rw-r--r--third_party/googleapis/google/cloud/speech/cloud_speech_v1beta1.yaml20
-rw-r--r--third_party/googleapis/google/cloud/speech/v1/cloud_speech.proto407
-rw-r--r--third_party/googleapis/google/cloud/speech/v1/cloud_speech_gapic.yaml86
-rw-r--r--third_party/googleapis/google/cloud/speech/v1beta1/cloud_speech.proto419
-rw-r--r--third_party/googleapis/google/cloud/speech/v1beta1/cloud_speech_gapic.yaml89
-rw-r--r--third_party/googleapis/google/cloud/support/common.proto334
-rw-r--r--third_party/googleapis/google/cloud/support/v1alpha1/cloud_support.proto199
-rw-r--r--third_party/googleapis/google/cloud/vision/v1/geometry.proto54
-rw-r--r--third_party/googleapis/google/cloud/vision/v1/image_annotator.proto569
-rw-r--r--third_party/googleapis/google/cloud/vision/v1/text_annotation.proto237
-rw-r--r--third_party/googleapis/google/cloud/vision/v1/vision_gapic.yaml58
-rw-r--r--third_party/googleapis/google/cloud/vision/v1/web_detection.proto78
-rw-r--r--third_party/googleapis/google/cloud/vision/vision.yaml19
-rw-r--r--third_party/googleapis/google/container/v1/cluster_service.proto986
-rw-r--r--third_party/googleapis/google/datastore/datastore.yaml22
-rw-r--r--third_party/googleapis/google/datastore/v1/datastore.proto318
-rw-r--r--third_party/googleapis/google/datastore/v1/datastore_gapic.yaml121
-rw-r--r--third_party/googleapis/google/datastore/v1/entity.proto203
-rw-r--r--third_party/googleapis/google/datastore/v1/query.proto309
-rw-r--r--third_party/googleapis/google/datastore/v1beta3/datastore.proto318
-rw-r--r--third_party/googleapis/google/datastore/v1beta3/entity.proto203
-rw-r--r--third_party/googleapis/google/datastore/v1beta3/query.proto309
-rw-r--r--third_party/googleapis/google/devtools/build/v1/build_events.proto202
-rw-r--r--third_party/googleapis/google/devtools/build/v1/build_status.proto63
-rw-r--r--third_party/googleapis/google/devtools/build/v1/publish_build_event.proto136
-rw-r--r--third_party/googleapis/google/devtools/cloudbuild/README.md1
-rw-r--r--third_party/googleapis/google/devtools/cloudbuild/v1/cloudbuild.proto579
-rw-r--r--third_party/googleapis/google/devtools/clouddebugger/clouddebugger.yaml36
-rw-r--r--third_party/googleapis/google/devtools/clouddebugger/v2/clouddebugger_gapic.yaml168
-rw-r--r--third_party/googleapis/google/devtools/clouddebugger/v2/controller.proto158
-rw-r--r--third_party/googleapis/google/devtools/clouddebugger/v2/data.proto448
-rw-r--r--third_party/googleapis/google/devtools/clouddebugger/v2/debugger.proto196
-rw-r--r--third_party/googleapis/google/devtools/clouderrorreporting/README.md1
-rw-r--r--third_party/googleapis/google/devtools/clouderrorreporting/errorreporting.yaml24
-rw-r--r--third_party/googleapis/google/devtools/clouderrorreporting/v1beta1/common.proto164
-rw-r--r--third_party/googleapis/google/devtools/clouderrorreporting/v1beta1/error_group_service.proto60
-rw-r--r--third_party/googleapis/google/devtools/clouderrorreporting/v1beta1/error_stats_service.proto341
-rw-r--r--third_party/googleapis/google/devtools/clouderrorreporting/v1beta1/errorreporting_gapic.yaml224
-rw-r--r--third_party/googleapis/google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto81
-rw-r--r--third_party/googleapis/google/devtools/cloudtrace/trace.yaml32
-rw-r--r--third_party/googleapis/google/devtools/cloudtrace/v1/trace.proto220
-rw-r--r--third_party/googleapis/google/devtools/cloudtrace/v1/trace_gapic.yaml88
-rw-r--r--third_party/googleapis/google/devtools/source/v1/source_context.proto181
-rw-r--r--third_party/googleapis/google/devtools/sourcerepo/v1/sourcerepo.proto147
-rw-r--r--third_party/googleapis/google/example/library/README.md4
-rw-r--r--third_party/googleapis/google/example/library/v1/library.proto254
-rw-r--r--third_party/googleapis/google/genomics/README.md14
-rw-r--r--third_party/googleapis/google/genomics/v1/annotations.proto672
-rw-r--r--third_party/googleapis/google/genomics/v1/cigar.proto99
-rw-r--r--third_party/googleapis/google/genomics/v1/datasets.proto212
-rw-r--r--third_party/googleapis/google/genomics/v1/operations.proto77
-rw-r--r--third_party/googleapis/google/genomics/v1/position.proto42
-rw-r--r--third_party/googleapis/google/genomics/v1/range.proto39
-rw-r--r--third_party/googleapis/google/genomics/v1/readalignment.proto221
-rw-r--r--third_party/googleapis/google/genomics/v1/readgroup.proto106
-rw-r--r--third_party/googleapis/google/genomics/v1/readgroupset.proto64
-rw-r--r--third_party/googleapis/google/genomics/v1/reads.proto468
-rw-r--r--third_party/googleapis/google/genomics/v1/references.proto282
-rw-r--r--third_party/googleapis/google/genomics/v1/variants.proto903
-rw-r--r--third_party/googleapis/google/genomics/v1alpha2/pipelines.proto614
-rw-r--r--third_party/googleapis/google/iam/README.md35
-rw-r--r--third_party/googleapis/google/iam/admin/v1/iam.proto469
-rw-r--r--third_party/googleapis/google/iam/admin/v1/iam_gapic.yaml251
-rw-r--r--third_party/googleapis/google/iam/iam.yaml21
-rw-r--r--third_party/googleapis/google/iam/v1/iam_policy.proto118
-rw-r--r--third_party/googleapis/google/iam/v1/policy.proto149
-rw-r--r--third_party/googleapis/google/logging/README.md3
-rw-r--r--third_party/googleapis/google/logging/logging.yaml80
-rw-r--r--third_party/googleapis/google/logging/type/http_request.proto88
-rw-r--r--third_party/googleapis/google/logging/type/log_severity.proto71
-rw-r--r--third_party/googleapis/google/logging/v2/log_entry.proto157
-rw-r--r--third_party/googleapis/google/logging/v2/logging.proto276
-rw-r--r--third_party/googleapis/google/logging/v2/logging_config.proto276
-rw-r--r--third_party/googleapis/google/logging/v2/logging_gapic.yaml472
-rw-r--r--third_party/googleapis/google/logging/v2/logging_metrics.proto179
-rw-r--r--third_party/googleapis/google/longrunning/README.md5
-rw-r--r--third_party/googleapis/google/longrunning/longrunning.yaml12
-rw-r--r--third_party/googleapis/google/longrunning/longrunning_gapic.yaml98
-rw-r--r--third_party/googleapis/google/longrunning/operations.proto159
-rw-r--r--third_party/googleapis/google/monitoring/monitoring.yaml83
-rw-r--r--third_party/googleapis/google/monitoring/v3/common.proto323
-rw-r--r--third_party/googleapis/google/monitoring/v3/group.proto74
-rw-r--r--third_party/googleapis/google/monitoring/v3/group_service.proto206
-rw-r--r--third_party/googleapis/google/monitoring/v3/metric.proto87
-rw-r--r--third_party/googleapis/google/monitoring/v3/metric_service.proto286
-rw-r--r--third_party/googleapis/google/monitoring/v3/monitoring_gapic.yaml371
-rw-r--r--third_party/googleapis/google/privacy/dlp/README.md7
-rw-r--r--third_party/googleapis/google/privacy/dlp/dlp.yaml59
-rw-r--r--third_party/googleapis/google/privacy/dlp/v2beta1/dlp.proto404
-rw-r--r--third_party/googleapis/google/privacy/dlp/v2beta1/dlp_gapic.yaml122
-rw-r--r--third_party/googleapis/google/privacy/dlp/v2beta1/storage.proto186
-rw-r--r--third_party/googleapis/google/pubsub/pubsub.yaml42
-rw-r--r--third_party/googleapis/google/pubsub/v1/pubsub.proto687
-rw-r--r--third_party/googleapis/google/pubsub/v1/pubsub_gapic.yaml680
-rw-r--r--third_party/googleapis/google/pubsub/v1beta2/README.md156
-rw-r--r--third_party/googleapis/google/pubsub/v1beta2/pubsub.proto384
-rw-r--r--third_party/googleapis/google/rpc/README.md5
-rw-r--r--third_party/googleapis/google/rpc/code.proto180
-rw-r--r--third_party/googleapis/google/rpc/error_details.proto171
-rw-r--r--third_party/googleapis/google/rpc/status.proto92
-rw-r--r--third_party/googleapis/google/spanner/admin/database/spanner_admin_database.yaml41
-rw-r--r--third_party/googleapis/google/spanner/admin/database/v1/spanner_admin_database_gapic.yaml214
-rw-r--r--third_party/googleapis/google/spanner/admin/database/v1/spanner_database_admin.proto277
-rw-r--r--third_party/googleapis/google/spanner/admin/instance/spanner_admin_instance.yaml41
-rw-r--r--third_party/googleapis/google/spanner/admin/instance/v1/spanner_admin_instance_gapic.yaml241
-rw-r--r--third_party/googleapis/google/spanner/admin/instance/v1/spanner_instance_admin.proto446
-rw-r--r--third_party/googleapis/google/spanner/spanner.yaml56
-rw-r--r--third_party/googleapis/google/spanner/v1/keys.proto162
-rw-r--r--third_party/googleapis/google/spanner/v1/mutation.proto92
-rw-r--r--third_party/googleapis/google/spanner/v1/query_plan.proto128
-rw-r--r--third_party/googleapis/google/spanner/v1/result_set.proto186
-rw-r--r--third_party/googleapis/google/spanner/v1/spanner.proto348
-rw-r--r--third_party/googleapis/google/spanner/v1/spanner_gapic.yaml216
-rw-r--r--third_party/googleapis/google/spanner/v1/transaction.proto373
-rw-r--r--third_party/googleapis/google/spanner/v1/type.proto111
-rw-r--r--third_party/googleapis/google/storagetransfer/v1/transfer.proto168
-rw-r--r--third_party/googleapis/google/storagetransfer/v1/transfer_types.proto443
-rw-r--r--third_party/googleapis/google/tracing/trace.proto247
-rw-r--r--third_party/googleapis/google/type/README.md16
-rw-r--r--third_party/googleapis/google/type/color.proto164
-rw-r--r--third_party/googleapis/google/type/date.proto45
-rw-r--r--third_party/googleapis/google/type/dayofweek.proto51
-rw-r--r--third_party/googleapis/google/type/latlng.proto71
-rw-r--r--third_party/googleapis/google/type/money.proto42
-rw-r--r--third_party/googleapis/google/type/postal_address.proto132
-rw-r--r--third_party/googleapis/google/type/timeofday.proto43
-rw-r--r--third_party/googleapis/google/watcher/v1/watch.proto283
232 files changed, 43436 insertions, 0 deletions
diff --git a/third_party/googleapis/BUILD b/third_party/googleapis/BUILD
new file mode 100644
index 0000000000..a501894db6
--- /dev/null
+++ b/third_party/googleapis/BUILD
@@ -0,0 +1,126 @@
+licenses(["notice"])
+
+exports_files(["LICENSE"])
+
+filegroup(
+ name = "srcs",
+ srcs = glob(["**"]),
+ visibility = ["//third_party:__pkg__"],
+)
+
+java_proto_library(
+ name = "google_devtools_build_v1_build_status_java_proto",
+ visibility = ["//visibility:public"],
+ deps = [":google_devtools_build_v1_build_status_proto"],
+)
+
+java_proto_library(
+ name = "google_devtools_build_v1_build_events_java_proto",
+ visibility = ["//visibility:public"],
+ deps = [":google_devtools_build_v1_build_events_proto"],
+)
+
+java_proto_library(
+ name = "google_devtools_build_v1_publish_build_event_java_proto",
+ visibility = ["//visibility:public"],
+ deps = [":google_devtools_build_v1_publish_build_event_proto"],
+)
+
+java_proto_library(
+ name = "google_bytestream_bytestream_java_proto",
+ visibility = ["//visibility:public"],
+ deps = [":google_bytestream_bytestream_proto"],
+)
+
+java_proto_library(
+ name = "google_longrunning_operations_java_proto",
+ visibility = ["//visibility:public"],
+ deps = [":google_longrunning_operations_proto"],
+)
+
+proto_library(
+ name = "google_bytestream_bytestream_proto",
+ srcs = ["google/bytestream/bytestream.proto"],
+ visibility = ["//visibility:public"],
+ deps = [
+ ":google_api_annotations_proto",
+ "@com_google_protobuf//:well_known_types_wrappers_proto",
+ ],
+)
+
+proto_library(
+ name = "google_longrunning_operations_proto",
+ srcs = ["google/longrunning/operations.proto"],
+ visibility = ["//visibility:public"],
+ deps = [
+ ":google_api_annotations_proto",
+ ":google_api_http_proto",
+ ":google_rpc_status_proto",
+ "@com_google_protobuf//:well_known_types_any_proto",
+ "@com_google_protobuf//:well_known_types_empty_proto",
+ ],
+)
+
+proto_library(
+ name = "google_devtools_build_v1_build_status_proto",
+ srcs = ["google/devtools/build/v1/build_status.proto"],
+ visibility = ["//visibility:public"],
+ deps = [
+ ":google_api_annotations_proto",
+ "@com_google_protobuf//:well_known_types_any_proto",
+ ],
+)
+
+proto_library(
+ name = "google_devtools_build_v1_build_events_proto",
+ srcs = ["google/devtools/build/v1/build_events.proto"],
+ visibility = ["//visibility:public"],
+ deps = [
+ ":google_api_annotations_proto",
+ ":google_devtools_build_v1_build_status_proto",
+ ":google_rpc_status_proto",
+ "@com_google_protobuf//:well_known_types_any_proto",
+ "@com_google_protobuf//:well_known_types_timestamp_proto",
+ "@com_google_protobuf//:well_known_types_wrappers_proto",
+ ],
+)
+
+proto_library(
+ name = "google_devtools_build_v1_publish_build_event_proto",
+ srcs = ["google/devtools/build/v1/publish_build_event.proto"],
+ visibility = ["//visibility:public"],
+ deps = [
+ ":google_api_annotations_proto",
+ ":google_api_auth_proto",
+ ":google_devtools_build_v1_build_events_proto",
+ "@com_google_protobuf//:well_known_types_any_proto",
+ "@com_google_protobuf//:well_known_types_duration_proto",
+ "@com_google_protobuf//:well_known_types_empty_proto",
+ ],
+)
+
+proto_library(
+ name = "google_api_annotations_proto",
+ srcs = ["google/api/annotations.proto"],
+ deps = [
+ ":google_api_http_proto",
+ "@com_google_protobuf//:well_known_types_descriptor_proto",
+ ],
+)
+
+proto_library(
+ name = "google_api_http_proto",
+ srcs = ["google/api/http.proto"],
+)
+
+proto_library(
+ name = "google_rpc_status_proto",
+ srcs = ["google/rpc/status.proto"],
+ deps = ["@com_google_protobuf//:well_known_types_any_proto"],
+)
+
+proto_library(
+ name = "google_api_auth_proto",
+ srcs = ["google/api/auth.proto"],
+ deps = [":google_api_annotations_proto"],
+)
diff --git a/third_party/googleapis/LICENSE b/third_party/googleapis/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/third_party/googleapis/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/third_party/googleapis/README.md b/third_party/googleapis/README.md
new file mode 100644
index 0000000000..34cd321d6c
--- /dev/null
+++ b/third_party/googleapis/README.md
@@ -0,0 +1,5 @@
+# Updating googleapis
+
+1) `git clone https://github.com/googleapis/googleapis.git`
+2) Delete every file/folder except for `google/` and `LICENSE`.
+3) Copy the BUILD file over from the old `googleapis` folder.
diff --git a/third_party/googleapis/google/api/README.md b/third_party/googleapis/google/api/README.md
new file mode 100644
index 0000000000..eafe588022
--- /dev/null
+++ b/third_party/googleapis/google/api/README.md
@@ -0,0 +1,5 @@
+This folder contains the schema of the configuration model for the API services
+platform.
+
+**Note**: Protos under this directory are in Alpha status, and therefore are
+subject to breaking changes.
diff --git a/third_party/googleapis/google/api/annotations.proto b/third_party/googleapis/google/api/annotations.proto
new file mode 100644
index 0000000000..85c361b47f
--- /dev/null
+++ b/third_party/googleapis/google/api/annotations.proto
@@ -0,0 +1,31 @@
+// Copyright (c) 2015, Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google/api/http.proto";
+import "google/protobuf/descriptor.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations";
+option java_multiple_files = true;
+option java_outer_classname = "AnnotationsProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+extend google.protobuf.MethodOptions {
+ // See `HttpRule`.
+ HttpRule http = 72295728;
+}
diff --git a/third_party/googleapis/google/api/auth.proto b/third_party/googleapis/google/api/auth.proto
new file mode 100644
index 0000000000..0ef6386319
--- /dev/null
+++ b/third_party/googleapis/google/api/auth.proto
@@ -0,0 +1,183 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google/api/annotations.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig";
+option java_multiple_files = true;
+option java_outer_classname = "AuthProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// `Authentication` defines the authentication configuration for an API.
+//
+// Example for an API targeted for external use:
+//
+// name: calendar.googleapis.com
+// authentication:
+// providers:
+// - id: google_calendar_auth
+// jwks_uri: https://www.googleapis.com/oauth2/v1/certs
+// issuer: https://securetoken.google.com
+// rules:
+// - selector: "*"
+// requirements:
+// provider_id: google_calendar_auth
+message Authentication {
+ // A list of authentication rules that apply to individual API methods.
+ //
+ // **NOTE:** All service configuration rules follow "last one wins" order.
+ repeated AuthenticationRule rules = 3;
+
+ // Defines a set of authentication providers that a service supports.
+ repeated AuthProvider providers = 4;
+}
+
+// Authentication rules for the service.
+//
+// By default, if a method has any authentication requirements, every request
+// must include a valid credential matching one of the requirements.
+// It's an error to include more than one kind of credential in a single
+// request.
+//
+// If a method doesn't have any auth requirements, request credentials will be
+// ignored.
+message AuthenticationRule {
+ // Selects the methods to which this rule applies.
+ //
+ // Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+ string selector = 1;
+
+ // The requirements for OAuth credentials.
+ OAuthRequirements oauth = 2;
+
+ // Whether to allow requests without a credential. The credential can be
+ // an OAuth token, Google cookies (first-party auth) or EndUserCreds.
+ //
+ // For requests without credentials, if the service control environment is
+ // specified, each incoming request **must** be associated with a service
+ // consumer. This can be done by passing an API key that belongs to a consumer
+ // project.
+ bool allow_without_credential = 5;
+
+ // Requirements for additional authentication providers.
+ repeated AuthRequirement requirements = 7;
+}
+
+// Configuration for an anthentication provider, including support for
+// [JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).
+message AuthProvider {
+ // The unique identifier of the auth provider. It will be referred to by
+ // `AuthRequirement.provider_id`.
+ //
+ // Example: "bookstore_auth".
+ string id = 1;
+
+ // Identifies the principal that issued the JWT. See
+ // https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1
+ // Usually a URL or an email address.
+ //
+ // Example: https://securetoken.google.com
+ // Example: 1234567-compute@developer.gserviceaccount.com
+ string issuer = 2;
+
+ // URL of the provider's public key set to validate signature of the JWT. See
+ // [OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata).
+ // Optional if the key set document:
+ // - can be retrieved from
+ // [OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html
+ // of the issuer.
+ // - can be inferred from the email domain of the issuer (e.g. a Google service account).
+ //
+ // Example: https://www.googleapis.com/oauth2/v1/certs
+ string jwks_uri = 3;
+
+ // The list of JWT
+ // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).
+ // that are allowed to access. A JWT containing any of these audiences will
+ // be accepted. When this setting is absent, only JWTs with audience
+ // "https://[Service_name][google.api.Service.name]/[API_name][google.protobuf.Api.name]"
+ // will be accepted. For example, if no audiences are in the setting,
+ // LibraryService API will only accept JWTs with the following audience
+ // "https://library-example.googleapis.com/google.example.library.v1.LibraryService".
+ //
+ // Example:
+ //
+ // audiences: bookstore_android.apps.googleusercontent.com,
+ // bookstore_web.apps.googleusercontent.com
+ string audiences = 4;
+}
+
+// OAuth scopes are a way to define data and permissions on data. For example,
+// there are scopes defined for "Read-only access to Google Calendar" and
+// "Access to Cloud Platform". Users can consent to a scope for an application,
+// giving it permission to access that data on their behalf.
+//
+// OAuth scope specifications should be fairly coarse grained; a user will need
+// to see and understand the text description of what your scope means.
+//
+// In most cases: use one or at most two OAuth scopes for an entire family of
+// products. If your product has multiple APIs, you should probably be sharing
+// the OAuth scope across all of those APIs.
+//
+// When you need finer grained OAuth consent screens: talk with your product
+// management about how developers will use them in practice.
+//
+// Please note that even though each of the canonical scopes is enough for a
+// request to be accepted and passed to the backend, a request can still fail
+// due to the backend requiring additional scopes or permissions.
+message OAuthRequirements {
+ // The list of publicly documented OAuth scopes that are allowed access. An
+ // OAuth token containing any of these scopes will be accepted.
+ //
+ // Example:
+ //
+ // canonical_scopes: https://www.googleapis.com/auth/calendar,
+ // https://www.googleapis.com/auth/calendar.read
+ string canonical_scopes = 1;
+}
+
+// User-defined authentication requirements, including support for
+// [JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).
+message AuthRequirement {
+ // [id][google.api.AuthProvider.id] from authentication provider.
+ //
+ // Example:
+ //
+ // provider_id: bookstore_auth
+ string provider_id = 1;
+
+ // NOTE: This will be deprecated soon, once AuthProvider.audiences is
+ // implemented and accepted in all the runtime components.
+ //
+ // The list of JWT
+ // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).
+ // that are allowed to access. A JWT containing any of these audiences will
+ // be accepted. When this setting is absent, only JWTs with audience
+ // "https://[Service_name][google.api.Service.name]/[API_name][google.protobuf.Api.name]"
+ // will be accepted. For example, if no audiences are in the setting,
+ // LibraryService API will only accept JWTs with the following audience
+ // "https://library-example.googleapis.com/google.example.library.v1.LibraryService".
+ //
+ // Example:
+ //
+ // audiences: bookstore_android.apps.googleusercontent.com,
+ // bookstore_web.apps.googleusercontent.com
+ string audiences = 2;
+}
diff --git a/third_party/googleapis/google/api/backend.proto b/third_party/googleapis/google/api/backend.proto
new file mode 100644
index 0000000000..61a72e14d1
--- /dev/null
+++ b/third_party/googleapis/google/api/backend.proto
@@ -0,0 +1,47 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig";
+option java_multiple_files = true;
+option java_outer_classname = "BackendProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// `Backend` defines the backend configuration for a service.
+message Backend {
+ // A list of API backend rules that apply to individual API methods.
+ //
+ // **NOTE:** All service configuration rules follow "last one wins" order.
+ repeated BackendRule rules = 1;
+}
+
+// A backend rule provides configuration for an individual API element.
+message BackendRule {
+ // Selects the methods to which this rule applies.
+ //
+ // Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+ string selector = 1;
+
+ // The address of the API backend.
+ string address = 2;
+
+ // The number of seconds to wait for a response from a request. The
+ // default depends on the deployment context.
+ double deadline = 3;
+}
diff --git a/third_party/googleapis/google/api/billing.proto b/third_party/googleapis/google/api/billing.proto
new file mode 100644
index 0000000000..6ecffd5451
--- /dev/null
+++ b/third_party/googleapis/google/api/billing.proto
@@ -0,0 +1,98 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google/api/annotations.proto";
+import "google/api/metric.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig";
+option java_multiple_files = true;
+option java_outer_classname = "BillingProto";
+option java_package = "com.google.api";
+
+
+// Billing related configuration of the service.
+//
+// The following example shows how to configure metrics for billing:
+//
+// metrics:
+// - name: library.googleapis.com/read_calls
+// metric_kind: DELTA
+// value_type: INT64
+// - name: library.googleapis.com/write_calls
+// metric_kind: DELTA
+// value_type: INT64
+// billing:
+// metrics:
+// - library.googleapis.com/read_calls
+// - library.googleapis.com/write_calls
+//
+// The next example shows how to enable billing status check and customize the
+// check behavior. It makes sure billing status check is included in the `Check`
+// method of [Service Control API](https://cloud.google.com/service-control/).
+// In the example, "google.storage.Get" method can be served when the billing
+// status is either `current` or `delinquent`, while "google.storage.Write"
+// method can only be served when the billing status is `current`:
+//
+// billing:
+// rules:
+// - selector: google.storage.Get
+// allowed_statuses:
+// - current
+// - delinquent
+// - selector: google.storage.Write
+// allowed_statuses: current
+//
+// Mostly services should only allow `current` status when serving requests.
+// In addition, services can choose to allow both `current` and `delinquent`
+// statuses when serving read-only requests to resources. If there's no
+// matching selector for operation, no billing status check will be performed.
+//
+message Billing {
+ // Names of the metrics to report to billing. Each name must
+ // be defined in [Service.metrics][google.api.Service.metrics] section.
+ repeated string metrics = 1;
+
+ // A list of billing status rules for configuring billing status check.
+ repeated BillingStatusRule rules = 5;
+}
+
+// Defines the billing status requirements for operations.
+//
+// When used with
+// [Service Control API](https://cloud.google.com/service-control/), the
+// following statuses are supported:
+//
+// - **current**: the associated billing account is up to date and capable of
+// paying for resource usages.
+// - **delinquent**: the associated billing account has a correctable problem,
+// such as late payment.
+//
+// Mostly services should only allow `current` status when serving requests.
+// In addition, services can choose to allow both `current` and `delinquent`
+// statuses when serving read-only requests to resources. If the list of
+// allowed_statuses is empty, it means no billing requirement.
+//
+message BillingStatusRule {
+ // Selects the operation names to which this rule applies.
+ // Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+ string selector = 1;
+
+ // Allowed billing statuses. The billing status check passes if the actual
+ // billing status matches any of the provided values here.
+ repeated string allowed_statuses = 2;
+}
diff --git a/third_party/googleapis/google/api/config_change.proto b/third_party/googleapis/google/api/config_change.proto
new file mode 100644
index 0000000000..3c416164ec
--- /dev/null
+++ b/third_party/googleapis/google/api/config_change.proto
@@ -0,0 +1,85 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option go_package = "google.golang.org/genproto/googleapis/api/configchange;configchange";
+option java_multiple_files = true;
+option java_outer_classname = "ConfigChangeProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// Output generated from semantically comparing two versions of a service
+// configuration.
+//
+// Includes detailed information about a field that have changed with
+// applicable advice about potential consequences for the change, such as
+// backwards-incompatibility.
+message ConfigChange {
+ // Object hierarchy path to the change, with levels separated by a '.'
+ // character. For repeated fields, an applicable unique identifier field is
+ // used for the index (usually selector, name, or id). For maps, the term
+ // 'key' is used. If the field has no unique identifier, the numeric index
+ // is used.
+ // Examples:
+ // - visibility.rules[selector=="google.LibraryService.CreateBook"].restriction
+ // - quota.metric_rules[selector=="google"].metric_costs[key=="reads"].value
+ // - logging.producer_destinations[0]
+ string element = 1;
+
+ // Value of the changed object in the old Service configuration,
+ // in JSON format. This field will not be populated if ChangeType == ADDED.
+ string old_value = 2;
+
+ // Value of the changed object in the new Service configuration,
+ // in JSON format. This field will not be populated if ChangeType == REMOVED.
+ string new_value = 3;
+
+ // The type for this change, either ADDED, REMOVED, or MODIFIED.
+ ChangeType change_type = 4;
+
+ // Collection of advice provided for this change, useful for determining the
+ // possible impact of this change.
+ repeated Advice advices = 5;
+}
+
+// Generated advice about this change, used for providing more
+// information about how a change will affect the existing service.
+message Advice {
+ // Useful description for why this advice was applied and what actions should
+ // be taken to mitigate any implied risks.
+ string description = 2;
+}
+
+// Classifies set of possible modifications to an object in the service
+// configuration.
+enum ChangeType {
+ // No value was provided.
+ CHANGE_TYPE_UNSPECIFIED = 0;
+
+ // The changed object exists in the 'new' service configuration, but not
+ // in the 'old' service configuration.
+ ADDED = 1;
+
+ // The changed object exists in the 'old' service configuration, but not
+ // in the 'new' service configuration.
+ REMOVED = 2;
+
+ // The changed object exists in both service configurations, but its value
+ // is different.
+ MODIFIED = 3;
+}
diff --git a/third_party/googleapis/google/api/consumer.proto b/third_party/googleapis/google/api/consumer.proto
new file mode 100644
index 0000000000..4e963168ef
--- /dev/null
+++ b/third_party/googleapis/google/api/consumer.proto
@@ -0,0 +1,83 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig";
+option java_multiple_files = true;
+option java_outer_classname = "ConsumerProto";
+option java_package = "com.google.api";
+
+
+// A descriptor for defining project properties for a service. One service may
+// have many consumer projects, and the service may want to behave differently
+// depending on some properties on the project. For example, a project may be
+// associated with a school, or a business, or a government agency, a business
+// type property on the project may affect how a service responds to the client.
+// This descriptor defines which properties are allowed to be set on a project.
+//
+// Example:
+//
+// project_properties:
+// properties:
+// - name: NO_WATERMARK
+// type: BOOL
+// description: Allows usage of the API without watermarks.
+// - name: EXTENDED_TILE_CACHE_PERIOD
+// type: INT64
+message ProjectProperties {
+ // List of per consumer project-specific properties.
+ repeated Property properties = 1;
+}
+
+// Defines project properties.
+//
+// API services can define properties that can be assigned to consumer projects
+// so that backends can perform response customization without having to make
+// additional calls or maintain additional storage. For example, Maps API
+// defines properties that controls map tile cache period, or whether to embed a
+// watermark in a result.
+//
+// These values can be set via API producer console. Only API providers can
+// define and set these properties.
+message Property {
+ // Supported data type of the property values
+ enum PropertyType {
+ // The type is unspecified, and will result in an error.
+ UNSPECIFIED = 0;
+
+ // The type is `int64`.
+ INT64 = 1;
+
+ // The type is `bool`.
+ BOOL = 2;
+
+ // The type is `string`.
+ STRING = 3;
+
+ // The type is 'double'.
+ DOUBLE = 4;
+ }
+
+ // The name of the property (a.k.a key).
+ string name = 1;
+
+ // The type of this property.
+ PropertyType type = 2;
+
+ // The description of the property
+ string description = 3;
+}
diff --git a/third_party/googleapis/google/api/context.proto b/third_party/googleapis/google/api/context.proto
new file mode 100644
index 0000000000..fd03fcae91
--- /dev/null
+++ b/third_party/googleapis/google/api/context.proto
@@ -0,0 +1,63 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig";
+option java_multiple_files = true;
+option java_outer_classname = "ContextProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// `Context` defines which contexts an API requests.
+//
+// Example:
+//
+// context:
+// rules:
+// - selector: "*"
+// requested:
+// - google.rpc.context.ProjectContext
+// - google.rpc.context.OriginContext
+//
+// The above specifies that all methods in the API request
+// `google.rpc.context.ProjectContext` and
+// `google.rpc.context.OriginContext`.
+//
+// Available context types are defined in package
+// `google.rpc.context`.
+message Context {
+ // A list of RPC context rules that apply to individual API methods.
+ //
+ // **NOTE:** All service configuration rules follow "last one wins" order.
+ repeated ContextRule rules = 1;
+}
+
+// A context rule provides information about the context for an individual API
+// element.
+message ContextRule {
+ // Selects the methods to which this rule applies.
+ //
+ // Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+ string selector = 1;
+
+ // A list of full type names of requested contexts.
+ repeated string requested = 2;
+
+ // A list of full type names of provided contexts.
+ repeated string provided = 3;
+}
diff --git a/third_party/googleapis/google/api/control.proto b/third_party/googleapis/google/api/control.proto
new file mode 100644
index 0000000000..acad56cbdd
--- /dev/null
+++ b/third_party/googleapis/google/api/control.proto
@@ -0,0 +1,33 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig";
+option java_multiple_files = true;
+option java_outer_classname = "ControlProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// Selects and configures the service controller used by the service. The
+// service controller handles features like abuse, quota, billing, logging,
+// monitoring, etc.
+message Control {
+ // The service control environment to use. If empty, no control plane
+ // feature (like quota and billing) will be enabled.
+ string environment = 1;
+}
diff --git a/third_party/googleapis/google/api/distribution.proto b/third_party/googleapis/google/api/distribution.proto
new file mode 100644
index 0000000000..2861e9b6f3
--- /dev/null
+++ b/third_party/googleapis/google/api/distribution.proto
@@ -0,0 +1,185 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google/api/annotations.proto";
+import "google/protobuf/any.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/api/distribution;distribution";
+option java_multiple_files = true;
+option java_outer_classname = "DistributionProto";
+option java_package = "com.google.api";
+
+
+// Distribution contains summary statistics for a population of values and,
+// optionally, a histogram representing the distribution of those values across
+// a specified set of histogram buckets.
+//
+// The summary statistics are the count, mean, sum of the squared deviation from
+// the mean, the minimum, and the maximum of the set of population of values.
+//
+// The histogram is based on a sequence of buckets and gives a count of values
+// that fall into each bucket. The boundaries of the buckets are given either
+// explicitly or by specifying parameters for a method of computing them
+// (buckets of fixed width or buckets of exponentially increasing width).
+//
+// Although it is not forbidden, it is generally a bad idea to include
+// non-finite values (infinities or NaNs) in the population of values, as this
+// will render the `mean` and `sum_of_squared_deviation` fields meaningless.
+message Distribution {
+ // The range of the population values.
+ message Range {
+ // The minimum of the population values.
+ double min = 1;
+
+ // The maximum of the population values.
+ double max = 2;
+ }
+
+ // A Distribution may optionally contain a histogram of the values in the
+ // population. The histogram is given in `bucket_counts` as counts of values
+ // that fall into one of a sequence of non-overlapping buckets. The sequence
+ // of buckets is described by `bucket_options`.
+ //
+ // A bucket specifies an inclusive lower bound and exclusive upper bound for
+ // the values that are counted for that bucket. The upper bound of a bucket
+ // is strictly greater than the lower bound.
+ //
+ // The sequence of N buckets for a Distribution consists of an underflow
+ // bucket (number 0), zero or more finite buckets (number 1 through N - 2) and
+ // an overflow bucket (number N - 1). The buckets are contiguous: the lower
+ // bound of bucket i (i > 0) is the same as the upper bound of bucket i - 1.
+ // The buckets span the whole range of finite values: lower bound of the
+ // underflow bucket is -infinity and the upper bound of the overflow bucket is
+ // +infinity. The finite buckets are so-called because both bounds are
+ // finite.
+ //
+ // `BucketOptions` describes bucket boundaries in one of three ways. Two
+ // describe the boundaries by giving parameters for a formula to generate
+ // boundaries and one gives the bucket boundaries explicitly.
+ //
+ // If `bucket_boundaries` is not given, then no `bucket_counts` may be given.
+ message BucketOptions {
+ // Specify a sequence of buckets that all have the same width (except
+ // overflow and underflow). Each bucket represents a constant absolute
+ // uncertainty on the specific value in the bucket.
+ //
+ // Defines `num_finite_buckets + 2` (= N) buckets with these boundaries for
+ // bucket `i`:
+ //
+ // Upper bound (0 <= i < N-1): offset + (width * i).
+ // Lower bound (1 <= i < N): offset + (width * (i - 1)).
+ message Linear {
+ // Must be greater than 0.
+ int32 num_finite_buckets = 1;
+
+ // Must be greater than 0.
+ double width = 2;
+
+ // Lower bound of the first bucket.
+ double offset = 3;
+ }
+
+ // Specify a sequence of buckets that have a width that is proportional to
+ // the value of the lower bound. Each bucket represents a constant relative
+ // uncertainty on a specific value in the bucket.
+ //
+ // Defines `num_finite_buckets + 2` (= N) buckets with these boundaries for
+ // bucket i:
+ //
+ // Upper bound (0 <= i < N-1): scale * (growth_factor ^ i).
+ // Lower bound (1 <= i < N): scale * (growth_factor ^ (i - 1)).
+ message Exponential {
+ // Must be greater than 0.
+ int32 num_finite_buckets = 1;
+
+ // Must be greater than 1.
+ double growth_factor = 2;
+
+ // Must be greater than 0.
+ double scale = 3;
+ }
+
+ // A set of buckets with arbitrary widths.
+ //
+ // Defines `size(bounds) + 1` (= N) buckets with these boundaries for
+ // bucket i:
+ //
+ // Upper bound (0 <= i < N-1): bounds[i]
+ // Lower bound (1 <= i < N); bounds[i - 1]
+ //
+ // There must be at least one element in `bounds`. If `bounds` has only one
+ // element, there are no finite buckets, and that single element is the
+ // common boundary of the overflow and underflow buckets.
+ message Explicit {
+ // The values must be monotonically increasing.
+ repeated double bounds = 1;
+ }
+
+ // Exactly one of these three fields must be set.
+ oneof options {
+ // The linear bucket.
+ Linear linear_buckets = 1;
+
+ // The exponential buckets.
+ Exponential exponential_buckets = 2;
+
+ // The explicit buckets.
+ Explicit explicit_buckets = 3;
+ }
+ }
+
+ // The number of values in the population. Must be non-negative.
+ int64 count = 1;
+
+ // The arithmetic mean of the values in the population. If `count` is zero
+ // then this field must be zero.
+ double mean = 2;
+
+ // The sum of squared deviations from the mean of the values in the
+ // population. For values x_i this is:
+ //
+ // Sum[i=1..n]((x_i - mean)^2)
+ //
+ // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition
+ // describes Welford's method for accumulating this sum in one pass.
+ //
+ // If `count` is zero then this field must be zero.
+ double sum_of_squared_deviation = 3;
+
+ // If specified, contains the range of the population values. The field
+ // must not be present if the `count` is zero.
+ Range range = 4;
+
+ // Defines the histogram bucket boundaries.
+ BucketOptions bucket_options = 6;
+
+ // If `bucket_options` is given, then the sum of the values in `bucket_counts`
+ // must equal the value in `count`. If `bucket_options` is not given, no
+ // `bucket_counts` fields may be given.
+ //
+ // Bucket counts are given in order under the numbering scheme described
+ // above (the underflow bucket has number 0; the finite buckets, if any,
+ // have numbers 1 through N-2; the overflow bucket has number N-1).
+ //
+ // The size of `bucket_counts` must be no greater than N as defined in
+ // `bucket_options`.
+ //
+ // Any suffix of trailing zero bucket_count fields may be omitted.
+ repeated int64 bucket_counts = 7;
+}
diff --git a/third_party/googleapis/google/api/documentation.proto b/third_party/googleapis/google/api/documentation.proto
new file mode 100644
index 0000000000..0b85eca51e
--- /dev/null
+++ b/third_party/googleapis/google/api/documentation.proto
@@ -0,0 +1,159 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig";
+option java_multiple_files = true;
+option java_outer_classname = "DocumentationProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// `Documentation` provides the information for describing a service.
+//
+// Example:
+// <pre><code>documentation:
+// summary: >
+// The Google Calendar API gives access
+// to most calendar features.
+// pages:
+// - name: Overview
+// content: &#40;== include google/foo/overview.md ==&#41;
+// - name: Tutorial
+// content: &#40;== include google/foo/tutorial.md ==&#41;
+// subpages;
+// - name: Java
+// content: &#40;== include google/foo/tutorial_java.md ==&#41;
+// rules:
+// - selector: google.calendar.Calendar.Get
+// description: >
+// ...
+// - selector: google.calendar.Calendar.Put
+// description: >
+// ...
+// </code></pre>
+// Documentation is provided in markdown syntax. In addition to
+// standard markdown features, definition lists, tables and fenced
+// code blocks are supported. Section headers can be provided and are
+// interpreted relative to the section nesting of the context where
+// a documentation fragment is embedded.
+//
+// Documentation from the IDL is merged with documentation defined
+// via the config at normalization time, where documentation provided
+// by config rules overrides IDL provided.
+//
+// A number of constructs specific to the API platform are supported
+// in documentation text.
+//
+// In order to reference a proto element, the following
+// notation can be used:
+// <pre><code>&#91;fully.qualified.proto.name]&#91;]</code></pre>
+// To override the display text used for the link, this can be used:
+// <pre><code>&#91;display text]&#91;fully.qualified.proto.name]</code></pre>
+// Text can be excluded from doc using the following notation:
+// <pre><code>&#40;-- internal comment --&#41;</code></pre>
+// Comments can be made conditional using a visibility label. The below
+// text will be only rendered if the `BETA` label is available:
+// <pre><code>&#40;--BETA: comment for BETA users --&#41;</code></pre>
+// A few directives are available in documentation. Note that
+// directives must appear on a single line to be properly
+// identified. The `include` directive includes a markdown file from
+// an external source:
+// <pre><code>&#40;== include path/to/file ==&#41;</code></pre>
+// The `resource_for` directive marks a message to be the resource of
+// a collection in REST view. If it is not specified, tools attempt
+// to infer the resource from the operations in a collection:
+// <pre><code>&#40;== resource_for v1.shelves.books ==&#41;</code></pre>
+// The directive `suppress_warning` does not directly affect documentation
+// and is documented together with service config validation.
+message Documentation {
+ // A short summary of what the service does. Can only be provided by
+ // plain text.
+ string summary = 1;
+
+ // The top level pages for the documentation set.
+ repeated Page pages = 5;
+
+ // A list of documentation rules that apply to individual API elements.
+ //
+ // **NOTE:** All service configuration rules follow "last one wins" order.
+ repeated DocumentationRule rules = 3;
+
+ // The URL to the root of documentation.
+ string documentation_root_url = 4;
+
+ // Declares a single overview page. For example:
+ // <pre><code>documentation:
+ // summary: ...
+ // overview: &#40;== include overview.md ==&#41;
+ // </code></pre>
+ // This is a shortcut for the following declaration (using pages style):
+ // <pre><code>documentation:
+ // summary: ...
+ // pages:
+ // - name: Overview
+ // content: &#40;== include overview.md ==&#41;
+ // </code></pre>
+ // Note: you cannot specify both `overview` field and `pages` field.
+ string overview = 2;
+}
+
+// A documentation rule provides information about individual API elements.
+message DocumentationRule {
+ // The selector is a comma-separated list of patterns. Each pattern is a
+ // qualified name of the element which may end in "*", indicating a wildcard.
+ // Wildcards are only allowed at the end and for a whole component of the
+ // qualified name, i.e. "foo.*" is ok, but not "foo.b*" or "foo.*.bar". To
+ // specify a default for all applicable elements, the whole pattern "*"
+ // is used.
+ string selector = 1;
+
+ // Description of the selected API(s).
+ string description = 2;
+
+ // Deprecation description of the selected element(s). It can be provided if an
+ // element is marked as `deprecated`.
+ string deprecation_description = 3;
+}
+
+// Represents a documentation page. A page can contain subpages to represent
+// nested documentation set structure.
+message Page {
+ // The name of the page. It will be used as an identity of the page to
+ // generate URI of the page, text of the link to this page in navigation,
+ // etc. The full page name (start from the root page name to this page
+ // concatenated with `.`) can be used as reference to the page in your
+ // documentation. For example:
+ // <pre><code>pages:
+ // - name: Tutorial
+ // content: &#40;== include tutorial.md ==&#41;
+ // subpages:
+ // - name: Java
+ // content: &#40;== include tutorial_java.md ==&#41;
+ // </code></pre>
+ // You can reference `Java` page using Markdown reference link syntax:
+ // `[Java][Tutorial.Java]`.
+ string name = 1;
+
+ // The Markdown content of the page. You can use <code>&#40;== include {path} ==&#41;</code>
+ // to include content from a Markdown file.
+ string content = 2;
+
+ // Subpages of this page. The order of subpages specified here will be
+ // honored in the generated docset.
+ repeated Page subpages = 3;
+}
diff --git a/third_party/googleapis/google/api/endpoint.proto b/third_party/googleapis/google/api/endpoint.proto
new file mode 100644
index 0000000000..68655b26a0
--- /dev/null
+++ b/third_party/googleapis/google/api/endpoint.proto
@@ -0,0 +1,74 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google/api/annotations.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig";
+option java_multiple_files = true;
+option java_outer_classname = "EndpointProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// `Endpoint` describes a network endpoint that serves a set of APIs.
+// A service may expose any number of endpoints, and all endpoints share the
+// same service configuration, such as quota configuration and monitoring
+// configuration.
+//
+// Example service configuration:
+//
+// name: library-example.googleapis.com
+// endpoints:
+// # Below entry makes 'google.example.library.v1.Library'
+// # API be served from endpoint address library-example.googleapis.com.
+// # It also allows HTTP OPTIONS calls to be passed to the backend, for
+// # it to decide whether the subsequent cross-origin request is
+// # allowed to proceed.
+// - name: library-example.googleapis.com
+// allow_cors: true
+message Endpoint {
+ // The canonical name of this endpoint.
+ string name = 1;
+
+ // DEPRECATED: This field is no longer supported. Instead of using aliases,
+ // please specify multiple [google.api.Endpoint][google.api.Endpoint] for each of the intented
+ // alias.
+ //
+ // Additional names that this endpoint will be hosted on.
+ repeated string aliases = 2;
+
+ // The list of APIs served by this endpoint.
+ repeated string apis = 3;
+
+ // The list of features enabled on this endpoint.
+ repeated string features = 4;
+
+ // The specification of an Internet routable address of API frontend that will
+ // handle requests to this [API Endpoint](https://cloud.google.com/apis/design/glossary).
+ // It should be either a valid IPv4 address or a fully-qualified domain name.
+ // For example, "8.8.8.8" or "myservice.appspot.com".
+ string target = 101;
+
+ // Allowing
+ // [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka
+ // cross-domain traffic, would allow the backends served from this endpoint to
+ // receive and respond to HTTP OPTIONS requests. The response will be used by
+ // the browser to determine whether the subsequent cross-origin request is
+ // allowed to proceed.
+ bool allow_cors = 5;
+}
diff --git a/third_party/googleapis/google/api/experimental/authorization_config.proto b/third_party/googleapis/google/api/experimental/authorization_config.proto
new file mode 100644
index 0000000000..5c213c6253
--- /dev/null
+++ b/third_party/googleapis/google/api/experimental/authorization_config.proto
@@ -0,0 +1,40 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option go_package = "google.golang.org/genproto/googleapis/api;api";
+option java_multiple_files = true;
+option java_outer_classname = "AuthorizationConfigProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// Configuration of authorization.
+//
+// This section determines the authorization provider, if unspecified, then no
+// authorization check will be done.
+//
+// Example:
+//
+// experimental:
+// authorization:
+// provider: firebaserules.googleapis.com
+message AuthorizationConfig {
+ // The name of the authorization provider, such as
+ // firebaserules.googleapis.com.
+ string provider = 1;
+}
diff --git a/third_party/googleapis/google/api/experimental/experimental.proto b/third_party/googleapis/google/api/experimental/experimental.proto
new file mode 100644
index 0000000000..b98097df54
--- /dev/null
+++ b/third_party/googleapis/google/api/experimental/experimental.proto
@@ -0,0 +1,34 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google/api/annotations.proto";
+import "google/api/experimental/authorization_config.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/api;api";
+option java_multiple_files = true;
+option java_outer_classname = "ExperimentalProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// Experimental service configuration. These configuration options can
+// only be used by whitelisted users.
+message Experimental {
+ // Authorization configuration.
+ AuthorizationConfig authorization = 8;
+}
diff --git a/third_party/googleapis/google/api/http.proto b/third_party/googleapis/google/api/http.proto
new file mode 100644
index 0000000000..5f8538a016
--- /dev/null
+++ b/third_party/googleapis/google/api/http.proto
@@ -0,0 +1,291 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations";
+option java_multiple_files = true;
+option java_outer_classname = "HttpProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// Defines the HTTP configuration for a service. It contains a list of
+// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method
+// to one or more HTTP REST API methods.
+message Http {
+ // A list of HTTP configuration rules that apply to individual API methods.
+ //
+ // **NOTE:** All service configuration rules follow "last one wins" order.
+ repeated HttpRule rules = 1;
+}
+
+// `HttpRule` defines the mapping of an RPC method to one or more HTTP
+// REST APIs. The mapping determines what portions of the request
+// message are populated from the path, query parameters, or body of
+// the HTTP request. The mapping is typically specified as an
+// `google.api.http` annotation, see "google/api/annotations.proto"
+// for details.
+//
+// The mapping consists of a field specifying the path template and
+// method kind. The path template can refer to fields in the request
+// message, as in the example below which describes a REST GET
+// operation on a resource collection of messages:
+//
+//
+// service Messaging {
+// rpc GetMessage(GetMessageRequest) returns (Message) {
+// option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}";
+// }
+// }
+// message GetMessageRequest {
+// message SubMessage {
+// string subfield = 1;
+// }
+// string message_id = 1; // mapped to the URL
+// SubMessage sub = 2; // `sub.subfield` is url-mapped
+// }
+// message Message {
+// string text = 1; // content of the resource
+// }
+//
+// The same http annotation can alternatively be expressed inside the
+// `GRPC API Configuration` YAML file.
+//
+// http:
+// rules:
+// - selector: <proto_package_name>.Messaging.GetMessage
+// get: /v1/messages/{message_id}/{sub.subfield}
+//
+// This definition enables an automatic, bidrectional mapping of HTTP
+// JSON to RPC. Example:
+//
+// HTTP | RPC
+// -----|-----
+// `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))`
+//
+// In general, not only fields but also field paths can be referenced
+// from a path pattern. Fields mapped to the path pattern cannot be
+// repeated and must have a primitive (non-message) type.
+//
+// Any fields in the request message which are not bound by the path
+// pattern automatically become (optional) HTTP query
+// parameters. Assume the following definition of the request message:
+//
+//
+// message GetMessageRequest {
+// message SubMessage {
+// string subfield = 1;
+// }
+// string message_id = 1; // mapped to the URL
+// int64 revision = 2; // becomes a parameter
+// SubMessage sub = 3; // `sub.subfield` becomes a parameter
+// }
+//
+//
+// This enables a HTTP JSON to RPC mapping as below:
+//
+// HTTP | RPC
+// -----|-----
+// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))`
+//
+// Note that fields which are mapped to HTTP parameters must have a
+// primitive type or a repeated primitive type. Message types are not
+// allowed. In the case of a repeated type, the parameter can be
+// repeated in the URL, as in `...?param=A&param=B`.
+//
+// For HTTP method kinds which allow a request body, the `body` field
+// specifies the mapping. Consider a REST update method on the
+// message resource collection:
+//
+//
+// service Messaging {
+// rpc UpdateMessage(UpdateMessageRequest) returns (Message) {
+// option (google.api.http) = {
+// put: "/v1/messages/{message_id}"
+// body: "message"
+// };
+// }
+// }
+// message UpdateMessageRequest {
+// string message_id = 1; // mapped to the URL
+// Message message = 2; // mapped to the body
+// }
+//
+//
+// The following HTTP JSON to RPC mapping is enabled, where the
+// representation of the JSON in the request body is determined by
+// protos JSON encoding:
+//
+// HTTP | RPC
+// -----|-----
+// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })`
+//
+// The special name `*` can be used in the body mapping to define that
+// every field not bound by the path template should be mapped to the
+// request body. This enables the following alternative definition of
+// the update method:
+//
+// service Messaging {
+// rpc UpdateMessage(Message) returns (Message) {
+// option (google.api.http) = {
+// put: "/v1/messages/{message_id}"
+// body: "*"
+// };
+// }
+// }
+// message Message {
+// string message_id = 1;
+// string text = 2;
+// }
+//
+//
+// The following HTTP JSON to RPC mapping is enabled:
+//
+// HTTP | RPC
+// -----|-----
+// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")`
+//
+// Note that when using `*` in the body mapping, it is not possible to
+// have HTTP parameters, as all fields not bound by the path end in
+// the body. This makes this option more rarely used in practice of
+// defining REST APIs. The common usage of `*` is in custom methods
+// which don't use the URL at all for transferring data.
+//
+// It is possible to define multiple HTTP methods for one RPC by using
+// the `additional_bindings` option. Example:
+//
+// service Messaging {
+// rpc GetMessage(GetMessageRequest) returns (Message) {
+// option (google.api.http) = {
+// get: "/v1/messages/{message_id}"
+// additional_bindings {
+// get: "/v1/users/{user_id}/messages/{message_id}"
+// }
+// };
+// }
+// }
+// message GetMessageRequest {
+// string message_id = 1;
+// string user_id = 2;
+// }
+//
+//
+// This enables the following two alternative HTTP JSON to RPC
+// mappings:
+//
+// HTTP | RPC
+// -----|-----
+// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")`
+// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")`
+//
+// # Rules for HTTP mapping
+//
+// The rules for mapping HTTP path, query parameters, and body fields
+// to the request message are as follows:
+//
+// 1. The `body` field specifies either `*` or a field path, or is
+// omitted. If omitted, it assumes there is no HTTP body.
+// 2. Leaf fields (recursive expansion of nested messages in the
+// request) can be classified into three types:
+// (a) Matched in the URL template.
+// (b) Covered by body (if body is `*`, everything except (a) fields;
+// else everything under the body field)
+// (c) All other fields.
+// 3. URL query parameters found in the HTTP request are mapped to (c) fields.
+// 4. Any body sent with an HTTP request can contain only (b) fields.
+//
+// The syntax of the path template is as follows:
+//
+// Template = "/" Segments [ Verb ] ;
+// Segments = Segment { "/" Segment } ;
+// Segment = "*" | "**" | LITERAL | Variable ;
+// Variable = "{" FieldPath [ "=" Segments ] "}" ;
+// FieldPath = IDENT { "." IDENT } ;
+// Verb = ":" LITERAL ;
+//
+// The syntax `*` matches a single path segment. It follows the semantics of
+// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String
+// Expansion.
+//
+// The syntax `**` matches zero or more path segments. It follows the semantics
+// of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.3 Reserved
+// Expansion. NOTE: it must be the last segment in the path except the Verb.
+//
+// The syntax `LITERAL` matches literal text in the URL path.
+//
+// The syntax `Variable` matches the entire path as specified by its template;
+// this nested template must not contain further variables. If a variable
+// matches a single path segment, its template may be omitted, e.g. `{var}`
+// is equivalent to `{var=*}`.
+//
+// NOTE: the field paths in variables and in the `body` must not refer to
+// repeated fields or map fields.
+//
+// Use CustomHttpPattern to specify any HTTP method that is not included in the
+// `pattern` field, such as HEAD, or "*" to leave the HTTP method unspecified for
+// a given URL path rule. The wild-card rule is useful for services that provide
+// content to Web (HTML) clients.
+message HttpRule {
+ // Selects methods to which this rule applies.
+ //
+ // Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+ string selector = 1;
+
+ // Determines the URL pattern is matched by this rules. This pattern can be
+ // used with any of the {get|put|post|delete|patch} methods. A custom method
+ // can be defined using the 'custom' field.
+ oneof pattern {
+ // Used for listing and getting information about resources.
+ string get = 2;
+
+ // Used for updating a resource.
+ string put = 3;
+
+ // Used for creating a resource.
+ string post = 4;
+
+ // Used for deleting a resource.
+ string delete = 5;
+
+ // Used for updating a resource.
+ string patch = 6;
+
+ // Custom pattern is used for defining custom verbs.
+ CustomHttpPattern custom = 8;
+ }
+
+ // The name of the request field whose value is mapped to the HTTP body, or
+ // `*` for mapping all fields not captured by the path pattern to the HTTP
+ // body. NOTE: the referred field must not be a repeated field and must be
+ // present at the top-level of request message type.
+ string body = 7;
+
+ // Additional HTTP bindings for the selector. Nested bindings must
+ // not contain an `additional_bindings` field themselves (that is,
+ // the nesting may only be one level deep).
+ repeated HttpRule additional_bindings = 11;
+}
+
+// A custom pattern is used for defining custom HTTP verb.
+message CustomHttpPattern {
+ // The name of this custom HTTP verb.
+ string kind = 1;
+
+ // The path matched by this custom verb.
+ string path = 2;
+}
diff --git a/third_party/googleapis/google/api/httpbody.proto b/third_party/googleapis/google/api/httpbody.proto
new file mode 100644
index 0000000000..f5176634b0
--- /dev/null
+++ b/third_party/googleapis/google/api/httpbody.proto
@@ -0,0 +1,70 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option go_package = "google.golang.org/genproto/googleapis/api/httpbody;httpbody";
+option java_multiple_files = true;
+option java_outer_classname = "HttpBodyProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// Message that represents an arbitrary HTTP body. It should only be used for
+// payload formats that can't be represented as JSON, such as raw binary or
+// an HTML page.
+//
+//
+// This message can be used both in streaming and non-streaming API methods in
+// the request as well as the response.
+//
+// It can be used as a top-level request field, which is convenient if one
+// wants to extract parameters from either the URL or HTTP template into the
+// request fields and also want access to the raw HTTP body.
+//
+// Example:
+//
+// message GetResourceRequest {
+// // A unique request id.
+// string request_id = 1;
+//
+// // The raw HTTP body is bound to this field.
+// google.api.HttpBody http_body = 2;
+// }
+//
+// service ResourceService {
+// rpc GetResource(GetResourceRequest) returns (google.api.HttpBody);
+// rpc UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty);
+// }
+//
+// Example with streaming methods:
+//
+// service CaldavService {
+// rpc GetCalendar(stream google.api.HttpBody)
+// returns (stream google.api.HttpBody);
+// rpc UpdateCalendar(stream google.api.HttpBody)
+// returns (stream google.api.HttpBody);
+// }
+//
+// Use of this type only changes how the request and response bodies are
+// handled, all other features will continue to work unchanged.
+message HttpBody {
+ // The HTTP Content-Type string representing the content type of the body.
+ string content_type = 1;
+
+ // HTTP body binary data.
+ bytes data = 2;
+}
diff --git a/third_party/googleapis/google/api/label.proto b/third_party/googleapis/google/api/label.proto
new file mode 100644
index 0000000000..ec2c14f0c3
--- /dev/null
+++ b/third_party/googleapis/google/api/label.proto
@@ -0,0 +1,49 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/api/label;label";
+option java_multiple_files = true;
+option java_outer_classname = "LabelProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// A description of a label.
+message LabelDescriptor {
+ // Value types that can be used as label values.
+ enum ValueType {
+ // A variable-length string. This is the default.
+ STRING = 0;
+
+ // Boolean; true or false.
+ BOOL = 1;
+
+ // A 64-bit signed integer.
+ INT64 = 2;
+ }
+
+ // The label key.
+ string key = 1;
+
+ // The type of data that can be assigned to the label.
+ ValueType value_type = 2;
+
+ // A human-readable description for the label.
+ string description = 3;
+}
diff --git a/third_party/googleapis/google/api/log.proto b/third_party/googleapis/google/api/log.proto
new file mode 100644
index 0000000000..d64f243958
--- /dev/null
+++ b/third_party/googleapis/google/api/log.proto
@@ -0,0 +1,55 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google/api/label.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig";
+option java_multiple_files = true;
+option java_outer_classname = "LogProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// A description of a log type. Example in YAML format:
+//
+// - name: library.googleapis.com/activity_history
+// description: The history of borrowing and returning library items.
+// display_name: Activity
+// labels:
+// - key: /customer_id
+// description: Identifier of a library customer
+message LogDescriptor {
+ // The name of the log. It must be less than 512 characters long and can
+ // include the following characters: upper- and lower-case alphanumeric
+ // characters [A-Za-z0-9], and punctuation characters including
+ // slash, underscore, hyphen, period [/_-.].
+ string name = 1;
+
+ // The set of labels that are available to describe a specific log entry.
+ // Runtime requests that contain labels not specified here are
+ // considered invalid.
+ repeated LabelDescriptor labels = 2;
+
+ // A human-readable description of this log. This information appears in
+ // the documentation and can contain details.
+ string description = 3;
+
+ // The human-readable name for this log. This information appears on
+ // the user interface and should be concise.
+ string display_name = 4;
+}
diff --git a/third_party/googleapis/google/api/logging.proto b/third_party/googleapis/google/api/logging.proto
new file mode 100644
index 0000000000..15c8a996e9
--- /dev/null
+++ b/third_party/googleapis/google/api/logging.proto
@@ -0,0 +1,83 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google/api/annotations.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig";
+option java_multiple_files = true;
+option java_outer_classname = "LoggingProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// Logging configuration of the service.
+//
+// The following example shows how to configure logs to be sent to the
+// producer and consumer projects. In the example, the `activity_history`
+// log is sent to both the producer and consumer projects, whereas the
+// `purchase_history` log is only sent to the producer project.
+//
+// monitored_resources:
+// - type: library.googleapis.com/branch
+// labels:
+// - key: /city
+// description: The city where the library branch is located in.
+// - key: /name
+// description: The name of the branch.
+// logs:
+// - name: activity_history
+// labels:
+// - key: /customer_id
+// - name: purchase_history
+// logging:
+// producer_destinations:
+// - monitored_resource: library.googleapis.com/branch
+// logs:
+// - activity_history
+// - purchase_history
+// consumer_destinations:
+// - monitored_resource: library.googleapis.com/branch
+// logs:
+// - activity_history
+message Logging {
+ // Configuration of a specific logging destination (the producer project
+ // or the consumer project).
+ message LoggingDestination {
+ // The monitored resource type. The type must be defined in the
+ // [Service.monitored_resources][google.api.Service.monitored_resources] section.
+ string monitored_resource = 3;
+
+ // Names of the logs to be sent to this destination. Each name must
+ // be defined in the [Service.logs][google.api.Service.logs] section. If the log name is
+ // not a domain scoped name, it will be automatically prefixed with
+ // the service name followed by "/".
+ repeated string logs = 1;
+ }
+
+ // Logging configurations for sending logs to the producer project.
+ // There can be multiple producer destinations, each one must have a
+ // different monitored resource type. A log can be used in at most
+ // one producer destination.
+ repeated LoggingDestination producer_destinations = 1;
+
+ // Logging configurations for sending logs to the consumer project.
+ // There can be multiple consumer destinations, each one must have a
+ // different monitored resource type. A log can be used in at most
+ // one consumer destination.
+ repeated LoggingDestination consumer_destinations = 2;
+}
diff --git a/third_party/googleapis/google/api/metric.proto b/third_party/googleapis/google/api/metric.proto
new file mode 100644
index 0000000000..17b7263a33
--- /dev/null
+++ b/third_party/googleapis/google/api/metric.proto
@@ -0,0 +1,196 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google/api/label.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/api/metric;metric";
+option java_multiple_files = true;
+option java_outer_classname = "MetricProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// Defines a metric type and its schema. Once a metric descriptor is created,
+// deleting or altering it stops data collection and makes the metric type's
+// existing data unusable.
+message MetricDescriptor {
+ // The kind of measurement. It describes how the data is reported.
+ enum MetricKind {
+ // Do not use this default value.
+ METRIC_KIND_UNSPECIFIED = 0;
+
+ // An instantaneous measurement of a value.
+ GAUGE = 1;
+
+ // The change in a value during a time interval.
+ DELTA = 2;
+
+ // A value accumulated over a time interval. Cumulative
+ // measurements in a time series should have the same start time
+ // and increasing end times, until an event resets the cumulative
+ // value to zero and sets a new start time for the following
+ // points.
+ CUMULATIVE = 3;
+ }
+
+ // The value type of a metric.
+ enum ValueType {
+ // Do not use this default value.
+ VALUE_TYPE_UNSPECIFIED = 0;
+
+ // The value is a boolean.
+ // This value type can be used only if the metric kind is `GAUGE`.
+ BOOL = 1;
+
+ // The value is a signed 64-bit integer.
+ INT64 = 2;
+
+ // The value is a double precision floating point number.
+ DOUBLE = 3;
+
+ // The value is a text string.
+ // This value type can be used only if the metric kind is `GAUGE`.
+ STRING = 4;
+
+ // The value is a [`Distribution`][google.api.Distribution].
+ DISTRIBUTION = 5;
+
+ // The value is money.
+ MONEY = 6;
+ }
+
+ // The resource name of the metric descriptor. Depending on the
+ // implementation, the name typically includes: (1) the parent resource name
+ // that defines the scope of the metric type or of its data; and (2) the
+ // metric's URL-encoded type, which also appears in the `type` field of this
+ // descriptor. For example, following is the resource name of a custom
+ // metric within the GCP project `my-project-id`:
+ //
+ // "projects/my-project-id/metricDescriptors/custom.googleapis.com%2Finvoice%2Fpaid%2Famount"
+ string name = 1;
+
+ // The metric type, including its DNS name prefix. The type is not
+ // URL-encoded. All user-defined custom metric types have the DNS name
+ // `custom.googleapis.com`. Metric types should use a natural hierarchical
+ // grouping. For example:
+ //
+ // "custom.googleapis.com/invoice/paid/amount"
+ // "appengine.googleapis.com/http/server/response_latencies"
+ string type = 8;
+
+ // The set of labels that can be used to describe a specific
+ // instance of this metric type. For example, the
+ // `appengine.googleapis.com/http/server/response_latencies` metric
+ // type has a label for the HTTP response code, `response_code`, so
+ // you can look at latencies for successful responses or just
+ // for responses that failed.
+ repeated LabelDescriptor labels = 2;
+
+ // Whether the metric records instantaneous values, changes to a value, etc.
+ // Some combinations of `metric_kind` and `value_type` might not be supported.
+ MetricKind metric_kind = 3;
+
+ // Whether the measurement is an integer, a floating-point number, etc.
+ // Some combinations of `metric_kind` and `value_type` might not be supported.
+ ValueType value_type = 4;
+
+ // The unit in which the metric value is reported. It is only applicable
+ // if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The
+ // supported units are a subset of [The Unified Code for Units of
+ // Measure](http://unitsofmeasure.org/ucum.html) standard:
+ //
+ // **Basic units (UNIT)**
+ //
+ // * `bit` bit
+ // * `By` byte
+ // * `s` second
+ // * `min` minute
+ // * `h` hour
+ // * `d` day
+ //
+ // **Prefixes (PREFIX)**
+ //
+ // * `k` kilo (10**3)
+ // * `M` mega (10**6)
+ // * `G` giga (10**9)
+ // * `T` tera (10**12)
+ // * `P` peta (10**15)
+ // * `E` exa (10**18)
+ // * `Z` zetta (10**21)
+ // * `Y` yotta (10**24)
+ // * `m` milli (10**-3)
+ // * `u` micro (10**-6)
+ // * `n` nano (10**-9)
+ // * `p` pico (10**-12)
+ // * `f` femto (10**-15)
+ // * `a` atto (10**-18)
+ // * `z` zepto (10**-21)
+ // * `y` yocto (10**-24)
+ // * `Ki` kibi (2**10)
+ // * `Mi` mebi (2**20)
+ // * `Gi` gibi (2**30)
+ // * `Ti` tebi (2**40)
+ //
+ // **Grammar**
+ //
+ // The grammar includes the dimensionless unit `1`, such as `1/s`.
+ //
+ // The grammar also includes these connectors:
+ //
+ // * `/` division (as an infix operator, e.g. `1/s`).
+ // * `.` multiplication (as an infix operator, e.g. `GBy.d`)
+ //
+ // The grammar for a unit is as follows:
+ //
+ // Expression = Component { "." Component } { "/" Component } ;
+ //
+ // Component = [ PREFIX ] UNIT [ Annotation ]
+ // | Annotation
+ // | "1"
+ // ;
+ //
+ // Annotation = "{" NAME "}" ;
+ //
+ // Notes:
+ //
+ // * `Annotation` is just a comment if it follows a `UNIT` and is
+ // equivalent to `1` if it is used alone. For examples,
+ // `{requests}/s == 1/s`, `By{transmitted}/s == By/s`.
+ // * `NAME` is a sequence of non-blank printable ASCII characters not
+ // containing '{' or '}'.
+ string unit = 5;
+
+ // A detailed description of the metric, which can be used in documentation.
+ string description = 6;
+
+ // A concise name for the metric, which can be displayed in user interfaces.
+ // Use sentence case without an ending period, for example "Request count".
+ string display_name = 7;
+}
+
+// A specific metric, identified by specifying values for all of the
+// labels of a [`MetricDescriptor`][google.api.MetricDescriptor].
+message Metric {
+ // An existing metric type, see [google.api.MetricDescriptor][google.api.MetricDescriptor].
+ // For example, `custom.googleapis.com/invoice/paid/amount`.
+ string type = 3;
+
+ // The set of label values that uniquely identify this metric. All
+ // labels listed in the `MetricDescriptor` must be assigned values.
+ map<string, string> labels = 2;
+}
diff --git a/third_party/googleapis/google/api/monitored_resource.proto b/third_party/googleapis/google/api/monitored_resource.proto
new file mode 100644
index 0000000000..612ffa18da
--- /dev/null
+++ b/third_party/googleapis/google/api/monitored_resource.proto
@@ -0,0 +1,91 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google/api/label.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/api/monitoredres;monitoredres";
+option java_multiple_files = true;
+option java_outer_classname = "MonitoredResourceProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// An object that describes the schema of a [MonitoredResource][google.api.MonitoredResource] object using a
+// type name and a set of labels. For example, the monitored resource
+// descriptor for Google Compute Engine VM instances has a type of
+// `"gce_instance"` and specifies the use of the labels `"instance_id"` and
+// `"zone"` to identify particular VM instances.
+//
+// Different APIs can support different monitored resource types. APIs generally
+// provide a `list` method that returns the monitored resource descriptors used
+// by the API.
+message MonitoredResourceDescriptor {
+ // Optional. The resource name of the monitored resource descriptor:
+ // `"projects/{project_id}/monitoredResourceDescriptors/{type}"` where
+ // {type} is the value of the `type` field in this object and
+ // {project_id} is a project ID that provides API-specific context for
+ // accessing the type. APIs that do not use project information can use the
+ // resource name format `"monitoredResourceDescriptors/{type}"`.
+ string name = 5;
+
+ // Required. The monitored resource type. For example, the type
+ // `"cloudsql_database"` represents databases in Google Cloud SQL.
+ // The maximum length of this value is 256 characters.
+ string type = 1;
+
+ // Optional. A concise name for the monitored resource type that might be
+ // displayed in user interfaces. It should be a Title Cased Noun Phrase,
+ // without any article or other determiners. For example,
+ // `"Google Cloud SQL Database"`.
+ string display_name = 2;
+
+ // Optional. A detailed description of the monitored resource type that might
+ // be used in documentation.
+ string description = 3;
+
+ // Required. A set of labels used to describe instances of this monitored
+ // resource type. For example, an individual Google Cloud SQL database is
+ // identified by values for the labels `"database_id"` and `"zone"`.
+ repeated LabelDescriptor labels = 4;
+}
+
+// An object representing a resource that can be used for monitoring, logging,
+// billing, or other purposes. Examples include virtual machine instances,
+// databases, and storage devices such as disks. The `type` field identifies a
+// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object that describes the resource's
+// schema. Information in the `labels` field identifies the actual resource and
+// its attributes according to the schema. For example, a particular Compute
+// Engine VM instance could be represented by the following object, because the
+// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] for `"gce_instance"` has labels
+// `"instance_id"` and `"zone"`:
+//
+// { "type": "gce_instance",
+// "labels": { "instance_id": "12345678901234",
+// "zone": "us-central1-a" }}
+message MonitoredResource {
+ // Required. The monitored resource type. This field must match
+ // the `type` field of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object. For
+ // example, the type of a Cloud SQL database is `"cloudsql_database"`.
+ string type = 1;
+
+ // Required. Values for all of the labels listed in the associated monitored
+ // resource descriptor. For example, Cloud SQL databases use the labels
+ // `"database_id"` and `"zone"`.
+ map<string, string> labels = 2;
+}
diff --git a/third_party/googleapis/google/api/monitoring.proto b/third_party/googleapis/google/api/monitoring.proto
new file mode 100644
index 0000000000..f49c85f85e
--- /dev/null
+++ b/third_party/googleapis/google/api/monitoring.proto
@@ -0,0 +1,89 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google/api/annotations.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig";
+option java_multiple_files = true;
+option java_outer_classname = "MonitoringProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// Monitoring configuration of the service.
+//
+// The example below shows how to configure monitored resources and metrics
+// for monitoring. In the example, a monitored resource and two metrics are
+// defined. The `library.googleapis.com/book/returned_count` metric is sent
+// to both producer and consumer projects, whereas the
+// `library.googleapis.com/book/overdue_count` metric is only sent to the
+// consumer project.
+//
+// monitored_resources:
+// - type: library.googleapis.com/branch
+// labels:
+// - key: /city
+// description: The city where the library branch is located in.
+// - key: /name
+// description: The name of the branch.
+// metrics:
+// - name: library.googleapis.com/book/returned_count
+// metric_kind: DELTA
+// value_type: INT64
+// labels:
+// - key: /customer_id
+// - name: library.googleapis.com/book/overdue_count
+// metric_kind: GAUGE
+// value_type: INT64
+// labels:
+// - key: /customer_id
+// monitoring:
+// producer_destinations:
+// - monitored_resource: library.googleapis.com/branch
+// metrics:
+// - library.googleapis.com/book/returned_count
+// consumer_destinations:
+// - monitored_resource: library.googleapis.com/branch
+// metrics:
+// - library.googleapis.com/book/returned_count
+// - library.googleapis.com/book/overdue_count
+message Monitoring {
+ // Configuration of a specific monitoring destination (the producer project
+ // or the consumer project).
+ message MonitoringDestination {
+ // The monitored resource type. The type must be defined in
+ // [Service.monitored_resources][google.api.Service.monitored_resources] section.
+ string monitored_resource = 1;
+
+ // Names of the metrics to report to this monitoring destination.
+ // Each name must be defined in [Service.metrics][google.api.Service.metrics] section.
+ repeated string metrics = 2;
+ }
+
+ // Monitoring configurations for sending metrics to the producer project.
+ // There can be multiple producer destinations, each one must have a
+ // different monitored resource type. A metric can be used in at most
+ // one producer destination.
+ repeated MonitoringDestination producer_destinations = 1;
+
+ // Monitoring configurations for sending metrics to the consumer project.
+ // There can be multiple consumer destinations, each one must have a
+ // different monitored resource type. A metric can be used in at most
+ // one consumer destination.
+ repeated MonitoringDestination consumer_destinations = 2;
+}
diff --git a/third_party/googleapis/google/api/quota.proto b/third_party/googleapis/google/api/quota.proto
new file mode 100644
index 0000000000..aa327a1969
--- /dev/null
+++ b/third_party/googleapis/google/api/quota.proto
@@ -0,0 +1,259 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google/api/annotations.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig";
+option java_multiple_files = true;
+option java_outer_classname = "QuotaProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// Quota configuration helps to achieve fairness and budgeting in service
+// usage.
+//
+// The quota configuration works this way:
+// - The service configuration defines a set of metrics.
+// - For API calls, the quota.metric_rules maps methods to metrics with
+// corresponding costs.
+// - The quota.limits defines limits on the metrics, which will be used for
+// quota checks at runtime.
+//
+// An example quota configuration in yaml format:
+//
+// quota:
+// limits:
+//
+// - name: apiWriteQpsPerProject
+// metric: library.googleapis.com/write_calls
+// unit: "1/min/{project}" # rate limit for consumer projects
+// values:
+// STANDARD: 10000
+//
+// # The metric rules bind all methods to the read_calls metric,
+// # except for the UpdateBook and DeleteBook methods. These two methods
+// # are mapped to the write_calls metric, with the UpdateBook method
+// # consuming at twice rate as the DeleteBook method.
+// metric_rules:
+// - selector: "*"
+// metric_costs:
+// library.googleapis.com/read_calls: 1
+// - selector: google.example.library.v1.LibraryService.UpdateBook
+// metric_costs:
+// library.googleapis.com/write_calls: 2
+// - selector: google.example.library.v1.LibraryService.DeleteBook
+// metric_costs:
+// library.googleapis.com/write_calls: 1
+//
+// Corresponding Metric definition:
+//
+// metrics:
+// - name: library.googleapis.com/read_calls
+// display_name: Read requests
+// metric_kind: DELTA
+// value_type: INT64
+//
+// - name: library.googleapis.com/write_calls
+// display_name: Write requests
+// metric_kind: DELTA
+// value_type: INT64
+//
+message Quota {
+ // List of `QuotaLimit` definitions for the service.
+ //
+ // Used by metric-based quotas only.
+ repeated QuotaLimit limits = 3;
+
+ // List of `MetricRule` definitions, each one mapping a selected method to one
+ // or more metrics.
+ //
+ // Used by metric-based quotas only.
+ repeated MetricRule metric_rules = 4;
+}
+
+// Bind API methods to metrics. Binding a method to a metric causes that
+// metric's configured quota, billing, and monitoring behaviors to apply to the
+// method call.
+//
+// Used by metric-based quotas only.
+message MetricRule {
+ // Selects the methods to which this rule applies.
+ //
+ // Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+ string selector = 1;
+
+ // Metrics to update when the selected methods are called, and the associated
+ // cost applied to each metric.
+ //
+ // The key of the map is the metric name, and the values are the amount
+ // increased for the metric against which the quota limits are defined.
+ // The value must not be negative.
+ map<string, int64> metric_costs = 2;
+}
+
+// `QuotaLimit` defines a specific limit that applies over a specified duration
+// for a limit type. There can be at most one limit for a duration and limit
+// type combination defined within a `QuotaGroup`.
+message QuotaLimit {
+ // Name of the quota limit. The name is used to refer to the limit when
+ // overriding the default limit on per-consumer basis.
+ //
+ // For group-based quota limits, the name must be unique within the quota
+ // group. If a name is not provided, it will be generated from the limit_by
+ // and duration fields.
+ //
+ // For metric-based quota limits, the name must be provided, and it must be
+ // unique within the service. The name can only include alphanumeric
+ // characters as well as '-'.
+ //
+ // The maximum length of the limit name is 64 characters.
+ //
+ // The name of a limit is used as a unique identifier for this limit.
+ // Therefore, once a limit has been put into use, its name should be
+ // immutable. You can use the display_name field to provide a user-friendly
+ // name for the limit. The display name can be evolved over time without
+ // affecting the identity of the limit.
+ string name = 6;
+
+ // Optional. User-visible, extended description for this quota limit.
+ // Should be used only when more context is needed to understand this limit
+ // than provided by the limit's display name (see: `display_name`).
+ string description = 2;
+
+ // Default number of tokens that can be consumed during the specified
+ // duration. This is the number of tokens assigned when a client
+ // application developer activates the service for his/her project.
+ //
+ // Specifying a value of 0 will block all requests. This can be used if you
+ // are provisioning quota to selected consumers and blocking others.
+ // Similarly, a value of -1 will indicate an unlimited quota. No other
+ // negative values are allowed.
+ //
+ // Used by group-based quotas only.
+ int64 default_limit = 3;
+
+ // Maximum number of tokens that can be consumed during the specified
+ // duration. Client application developers can override the default limit up
+ // to this maximum. If specified, this value cannot be set to a value less
+ // than the default limit. If not specified, it is set to the default limit.
+ //
+ // To allow clients to apply overrides with no upper bound, set this to -1,
+ // indicating unlimited maximum quota.
+ //
+ // Used by group-based quotas only.
+ int64 max_limit = 4;
+
+ // Free tier value displayed in the Developers Console for this limit.
+ // The free tier is the number of tokens that will be subtracted from the
+ // billed amount when billing is enabled.
+ // This field can only be set on a limit with duration "1d", in a billable
+ // group; it is invalid on any other limit. If this field is not set, it
+ // defaults to 0, indicating that there is no free tier for this service.
+ //
+ // Used by group-based quotas only.
+ int64 free_tier = 7;
+
+ // Duration of this limit in textual notation. Example: "100s", "24h", "1d".
+ // For duration longer than a day, only multiple of days is supported. We
+ // support only "100s" and "1d" for now. Additional support will be added in
+ // the future. "0" indicates indefinite duration.
+ //
+ // Used by group-based quotas only.
+ string duration = 5;
+
+ // The name of the metric this quota limit applies to. The quota limits with
+ // the same metric will be checked together during runtime. The metric must be
+ // defined within the service config.
+ //
+ // Used by metric-based quotas only.
+ string metric = 8;
+
+ // Specify the unit of the quota limit. It uses the same syntax as
+ // [Metric.unit][]. The supported unit kinds are determined by the quota
+ // backend system.
+ //
+ // The [Google Service Control](https://cloud.google.com/service-control)
+ // supports the following unit components:
+ // * One of the time intevals:
+ // * "/min" for quota every minute.
+ // * "/d" for quota every 24 hours, starting 00:00 US Pacific Time.
+ // * Otherwise the quota won't be reset by time, such as storage limit.
+ // * One and only one of the granted containers:
+ // * "/{organization}" quota for an organization.
+ // * "/{project}" quota for a project.
+ // * "/{folder}" quota for a folder.
+ // * "/{resource}" quota for a universal resource.
+ // * Zero or more quota segmentation dimension. Not all combos are valid.
+ // * "/{region}" quota for every region. Not to be used with time intervals.
+ // * Otherwise the resources granted on the target is not segmented.
+ // * "/{zone}" quota for every zone. Not to be used with time intervals.
+ // * Otherwise the resources granted on the target is not segmented.
+ // * "/{resource}" quota for a resource associated with a project or org.
+ //
+ // Here are some examples:
+ // * "1/min/{project}" for quota per minute per project.
+ // * "1/min/{user}" for quota per minute per user.
+ // * "1/min/{organization}" for quota per minute per organization.
+ //
+ // Note: the order of unit components is insignificant.
+ // The "1" at the beginning is required to follow the metric unit syntax.
+ //
+ // Used by metric-based quotas only.
+ string unit = 9;
+
+ // Tiered limit values. Also allows for regional or zone overrides for these
+ // values if "/{region}" or "/{zone}" is specified in the unit field.
+ //
+ // Currently supported tiers from low to high:
+ // VERY_LOW, LOW, STANDARD, HIGH, VERY_HIGH
+ //
+ // To apply different limit values for users according to their tiers, specify
+ // the values for the tiers you want to differentiate. For example:
+ // {LOW:100, STANDARD:500, HIGH:1000, VERY_HIGH:5000}
+ //
+ // The limit value for each tier is optional except for the tier STANDARD.
+ // The limit value for an unspecified tier falls to the value of its next
+ // tier towards tier STANDARD. For the above example, the limit value for tier
+ // STANDARD is 500.
+ //
+ // To apply the same limit value for all users, just specify limit value for
+ // tier STANDARD. For example: {STANDARD:500}.
+ //
+ // To apply a regional overide for a tier, add a map entry with key
+ // "<TIER>/<region>", where <region> is a region name. Similarly, for a zone
+ // override, add a map entry with key "<TIER>/{zone}".
+ // Further, a wildcard can be used at the end of a zone name in order to
+ // specify zone level overrides. For example:
+ // LOW: 10, STANDARD: 50, HIGH: 100,
+ // LOW/us-central1: 20, STANDARD/us-central1: 60, HIGH/us-central1: 200,
+ // LOW/us-central1-*: 10, STANDARD/us-central1-*: 20, HIGH/us-central1-*: 80
+ //
+ // The regional overrides tier set for each region must be the same as
+ // the tier set for default limit values. Same rule applies for zone overrides
+ // tier as well.
+ //
+ // Used by metric-based quotas only.
+ map<string, int64> values = 10;
+
+ // User-visible display name for this limit.
+ // Optional. If not set, the UI will provide a default display name based on
+ // the quota configuration. This field can be used to override the default
+ // display name generated from the configuration.
+ string display_name = 12;
+}
diff --git a/third_party/googleapis/google/api/service.proto b/third_party/googleapis/google/api/service.proto
new file mode 100644
index 0000000000..04c7fd700b
--- /dev/null
+++ b/third_party/googleapis/google/api/service.proto
@@ -0,0 +1,176 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google/api/annotations.proto";
+import "google/api/auth.proto";
+import "google/api/backend.proto";
+import "google/api/context.proto";
+import "google/api/control.proto";
+import "google/api/documentation.proto";
+import "google/api/endpoint.proto";
+import "google/api/experimental/experimental.proto";
+import "google/api/http.proto";
+import "google/api/label.proto";
+import "google/api/log.proto";
+import "google/api/logging.proto";
+import "google/api/metric.proto";
+import "google/api/monitored_resource.proto";
+import "google/api/monitoring.proto";
+import "google/api/quota.proto";
+import "google/api/source_info.proto";
+import "google/api/system_parameter.proto";
+import "google/api/usage.proto";
+import "google/protobuf/any.proto";
+import "google/protobuf/api.proto";
+import "google/protobuf/type.proto";
+import "google/protobuf/wrappers.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig";
+option java_multiple_files = true;
+option java_outer_classname = "ServiceProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// `Service` is the root object of Google service configuration schema. It
+// describes basic information about a service, such as the name and the
+// title, and delegates other aspects to sub-sections. Each sub-section is
+// either a proto message or a repeated proto message that configures a
+// specific aspect, such as auth. See each proto message definition for details.
+//
+// Example:
+//
+// type: google.api.Service
+// config_version: 3
+// name: calendar.googleapis.com
+// title: Google Calendar API
+// apis:
+// - name: google.calendar.v3.Calendar
+// authentication:
+// providers:
+// - id: google_calendar_auth
+// jwks_uri: https://www.googleapis.com/oauth2/v1/certs
+// issuer: https://securetoken.google.com
+// rules:
+// - selector: "*"
+// requirements:
+// provider_id: google_calendar_auth
+message Service {
+ // The version of the service configuration. The config version may
+ // influence interpretation of the configuration, for example, to
+ // determine defaults. This is documented together with applicable
+ // options. The current default for the config version itself is `3`.
+ google.protobuf.UInt32Value config_version = 20;
+
+ // The DNS address at which this service is available,
+ // e.g. `calendar.googleapis.com`.
+ string name = 1;
+
+ // A unique ID for a specific instance of this message, typically assigned
+ // by the client for tracking purpose. If empty, the server may choose to
+ // generate one instead.
+ string id = 33;
+
+ // The product title associated with this service.
+ string title = 2;
+
+ // The id of the Google developer project that owns the service.
+ // Members of this project can manage the service configuration,
+ // manage consumption of the service, etc.
+ string producer_project_id = 22;
+
+ // A list of API interfaces exported by this service. Only the `name` field
+ // of the [google.protobuf.Api][google.protobuf.Api] needs to be provided by the configuration
+ // author, as the remaining fields will be derived from the IDL during the
+ // normalization process. It is an error to specify an API interface here
+ // which cannot be resolved against the associated IDL files.
+ repeated google.protobuf.Api apis = 3;
+
+ // A list of all proto message types included in this API service.
+ // Types referenced directly or indirectly by the `apis` are
+ // automatically included. Messages which are not referenced but
+ // shall be included, such as types used by the `google.protobuf.Any` type,
+ // should be listed here by name. Example:
+ //
+ // types:
+ // - name: google.protobuf.Int32
+ repeated google.protobuf.Type types = 4;
+
+ // A list of all enum types included in this API service. Enums
+ // referenced directly or indirectly by the `apis` are automatically
+ // included. Enums which are not referenced but shall be included
+ // should be listed here by name. Example:
+ //
+ // enums:
+ // - name: google.someapi.v1.SomeEnum
+ repeated google.protobuf.Enum enums = 5;
+
+ // Additional API documentation.
+ Documentation documentation = 6;
+
+ // API backend configuration.
+ Backend backend = 8;
+
+ // HTTP configuration.
+ Http http = 9;
+
+ // Quota configuration.
+ Quota quota = 10;
+
+ // Auth configuration.
+ Authentication authentication = 11;
+
+ // Context configuration.
+ Context context = 12;
+
+ // Configuration controlling usage of this service.
+ Usage usage = 15;
+
+ // Configuration for network endpoints. If this is empty, then an endpoint
+ // with the same name as the service is automatically generated to service all
+ // defined APIs.
+ repeated Endpoint endpoints = 18;
+
+ // Configuration for the service control plane.
+ Control control = 21;
+
+ // Defines the logs used by this service.
+ repeated LogDescriptor logs = 23;
+
+ // Defines the metrics used by this service.
+ repeated MetricDescriptor metrics = 24;
+
+ // Defines the monitored resources used by this service. This is required
+ // by the [Service.monitoring][google.api.Service.monitoring] and [Service.logging][google.api.Service.logging] configurations.
+ repeated MonitoredResourceDescriptor monitored_resources = 25;
+
+ // Logging configuration.
+ Logging logging = 27;
+
+ // Monitoring configuration.
+ Monitoring monitoring = 28;
+
+ // System parameter configuration.
+ SystemParameters system_parameters = 29;
+
+ // Output only. The source information for this configuration if available.
+ SourceInfo source_info = 37;
+
+ // Experimental configuration.
+ Experimental experimental = 101;
+}
diff --git a/third_party/googleapis/google/api/servicecontrol/README.md b/third_party/googleapis/google/api/servicecontrol/README.md
new file mode 100644
index 0000000000..860fdd3f78
--- /dev/null
+++ b/third_party/googleapis/google/api/servicecontrol/README.md
@@ -0,0 +1,126 @@
+Google Service Control provides control plane functionality to managed services,
+such as logging, monitoring, and status checks. This page provides an overview
+of what it does and how it works.
+
+## Why use Service Control?
+
+When you develop a cloud service, you typically start with the business
+requirements and the architecture design, then proceed with API definition
+and implementation. Before you put your service into production, you
+need to deal with many control plane issues:
+
+* How to control access to your service.
+* How to send logging and monitoring data to both consumers and producers.
+* How to create and manage dashboards to visualize this data.
+* How to automatically scale the control plane components with your service.
+
+Service Control is a mature and feature-rich control plane provider
+that addresses these needs with high efficiency, high scalability,
+and high availability. It provides a simple public API that can be accessed
+from anywhere using JSON REST and gRPC clients, so when you move your service
+from on-premise to a cloud provider, or from one cloud provider to another,
+you don't need to change the control plane provider.
+
+Services built using Google Cloud Endpoints already take advantage of
+Service Control. Cloud Endpoints sends logging and monitoring data
+through Google Service Control for every request arriving at its
+proxy. If you need to report any additional logging and monitoring data for
+your Cloud Endpoints service, you can call the Service Control API directly
+from your service.
+
+The Service Control API definition is open sourced and available on
+[GitHub](https://github.com/googleapis/googleapis/tree/master/google/api/servicecontrol).
+By changing the DNS name, you can easily use alternative implementations of
+the Service Control API.
+
+## Architecture
+
+Google Service Control works with a set of *managed services* and their
+*operations* (activities), *checks* whether an operation is allowed to proceed,
+and *reports* completed operations. Behind the scenes, it leverages other
+Google Cloud services, such as
+[Google Service Management](/service-management),
+[Stackdriver Logging](/logging), and [Stackdriver Monitoring](/monitoring),
+while hiding their complexity from service producers. It enables service
+producers to send telemetry data to their consumers. It uses caching,
+batching, aggregation, and retries to deliver higher performance and
+availability than the individual backend systems it encapsulates.
+
+<figure id="fig-arch" class="center">
+<div style="width: 70%;margin: auto">
+ <img src="/service-control/images/arch.svg"
+ alt="The overall architecture of a service that uses Google Service Control.">
+</div>
+<figcaption><b>Figure 1</b>: Using Google Service Control.</figcaption>
+</figure>
+
+The Service Control API provides two methods:
+
+* [`services.check`](/service-control/reference/rest/v1/services/check), used for:
+ * Ensuring valid consumer status
+ * Validating API keys
+* [`services.report`](/service-control/reference/rest/v1/services/report), used for:
+ * Sending logs to Stackdriver Logging
+ * Sending metrics to Stackdriver Monitoring
+
+We’ll look at these in more detail in the rest of this overview.
+
+## Managed services
+
+A [managed service](/service-management/reference/rest/v1/services) is
+a network service managed by
+[Google Service Management](/service-management). Each nabaged service has a
+unique name, such as `example.googleapis.com`, which must be a valid
+fully-qualified DNS name, as per RFC 1035.
+
+For example:
+
+* Google Cloud Pub/Sub (`pubsub.googleapis.com`)
+* Google Cloud Vision (`vision.googleapis.com`)
+* Google Cloud Bigtable (`bigtable.googleapis.com`)
+* Google Cloud Datastore (`datastore.googleapis.com`)
+
+Google Service Management manages the lifecycle of each service’s
+configuration, which is used to customize Google Service Control's behavior.
+Service configurations are also used by Google Cloud Console
+for displaying APIs and their settings, enabling/disabling APIs, and more.
+
+## Operations
+
+Google Service Control uses the generic concept of an *operation*
+to represent the
+activities of a managed service, such as API calls and resource usage. Each
+operation is associated with a managed service and a specific service
+consumer, and has a set of properties that describe the operation, such as
+the API method name and resource usage amount. For more information, see the
+[Operation definition](/service-control/rest/v1/Operation).
+
+## Check
+
+The [`services.check`](/service-control/reference/rest/v1/services/check)
+method determines whether an operation should be allowed to proceed
+for a managed service.
+
+For example:
+
+* Check if the consumer is still active.
+* Check if the consumer has enabled the service.
+* Check if the API key is still valid.
+
+By performing multiple checks within a single method call, it provides
+better performance, higher reliability, and reduced development cost to
+service producers compared to checking with multiple backend systems.
+
+## Report
+
+The [`services.report`](/service-control/reference/rest/v1/services/report)
+method reports completed operations for
+a managed service to backend systems, such as logging and monitoring. The
+reported data can be seen in Google API Console and Google Cloud Console,
+and retrieved with appropriate APIs, such as the Stackdriver Logging and
+Stackdriver Monitoring APIs.
+
+## Next steps
+
+* Read our [Getting Started guide](/service-control/getting-started) to find out
+ how to set up and use the Google Service Control API. \ No newline at end of file
diff --git a/third_party/googleapis/google/api/servicecontrol/v1/check_error.proto b/third_party/googleapis/google/api/servicecontrol/v1/check_error.proto
new file mode 100644
index 0000000000..4fa31cf4c2
--- /dev/null
+++ b/third_party/googleapis/google/api/servicecontrol/v1/check_error.proto
@@ -0,0 +1,95 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api.servicecontrol.v1;
+
+import "google/api/annotations.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/api/servicecontrol/v1;servicecontrol";
+option java_multiple_files = true;
+option java_outer_classname = "CheckErrorProto";
+option java_package = "com.google.api.servicecontrol.v1";
+
+
+// Defines the errors to be returned in
+// [google.api.servicecontrol.v1.CheckResponse.check_errors][google.api.servicecontrol.v1.CheckResponse.check_errors].
+message CheckError {
+ // Error codes for Check responses.
+ enum Code {
+ // This is never used in `CheckResponse`.
+ ERROR_CODE_UNSPECIFIED = 0;
+
+ // The consumer's project id was not found.
+ // Same as [google.rpc.Code.NOT_FOUND][].
+ NOT_FOUND = 5;
+
+ // The consumer doesn't have access to the specified resource.
+ // Same as [google.rpc.Code.PERMISSION_DENIED][].
+ PERMISSION_DENIED = 7;
+
+ // Quota check failed. Same as [google.rpc.Code.RESOURCE_EXHAUSTED][].
+ RESOURCE_EXHAUSTED = 8;
+
+ // The consumer hasn't activated the service.
+ SERVICE_NOT_ACTIVATED = 104;
+
+ // The consumer cannot access the service because billing is disabled.
+ BILLING_DISABLED = 107;
+
+ // The consumer's project has been marked as deleted (soft deletion).
+ PROJECT_DELETED = 108;
+
+ // The consumer's project number or id does not represent a valid project.
+ PROJECT_INVALID = 114;
+
+ // The IP address of the consumer is invalid for the specific consumer
+ // project.
+ IP_ADDRESS_BLOCKED = 109;
+
+ // The referer address of the consumer request is invalid for the specific
+ // consumer project.
+ REFERER_BLOCKED = 110;
+
+ // The client application of the consumer request is invalid for the
+ // specific consumer project.
+ CLIENT_APP_BLOCKED = 111;
+
+ // The consumer's API key is invalid.
+ API_KEY_INVALID = 105;
+
+ // The consumer's API Key has expired.
+ API_KEY_EXPIRED = 112;
+
+ // The consumer's API Key was not found in config record.
+ API_KEY_NOT_FOUND = 113;
+
+ // The backend server for looking up project id/number is unavailable.
+ NAMESPACE_LOOKUP_UNAVAILABLE = 300;
+
+ // The backend server for checking service status is unavailable.
+ SERVICE_STATUS_UNAVAILABLE = 301;
+
+ // The backend server for checking billing status is unavailable.
+ BILLING_STATUS_UNAVAILABLE = 302;
+ }
+
+ // The error code.
+ Code code = 1;
+
+ // Free-form text providing details on the error cause of the error.
+ string detail = 2;
+}
diff --git a/third_party/googleapis/google/api/servicecontrol/v1/distribution.proto b/third_party/googleapis/google/api/servicecontrol/v1/distribution.proto
new file mode 100644
index 0000000000..93ec2e4d67
--- /dev/null
+++ b/third_party/googleapis/google/api/servicecontrol/v1/distribution.proto
@@ -0,0 +1,159 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api.servicecontrol.v1;
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/api/servicecontrol/v1;servicecontrol";
+option java_multiple_files = true;
+option java_outer_classname = "DistributionProto";
+option java_package = "com.google.api.servicecontrol.v1";
+
+
+// Distribution represents a frequency distribution of double-valued sample
+// points. It contains the size of the population of sample points plus
+// additional optional information:
+//
+// - the arithmetic mean of the samples
+// - the minimum and maximum of the samples
+// - the sum-squared-deviation of the samples, used to compute variance
+// - a histogram of the values of the sample points
+message Distribution {
+ // Describing buckets with constant width.
+ message LinearBuckets {
+ // The number of finite buckets. With the underflow and overflow buckets,
+ // the total number of buckets is `num_finite_buckets` + 2.
+ // See comments on `bucket_options` for details.
+ int32 num_finite_buckets = 1;
+
+ // The i'th linear bucket covers the interval
+ // [offset + (i-1) * width, offset + i * width)
+ // where i ranges from 1 to num_finite_buckets, inclusive.
+ // Must be strictly positive.
+ double width = 2;
+
+ // The i'th linear bucket covers the interval
+ // [offset + (i-1) * width, offset + i * width)
+ // where i ranges from 1 to num_finite_buckets, inclusive.
+ double offset = 3;
+ }
+
+ // Describing buckets with exponentially growing width.
+ message ExponentialBuckets {
+ // The number of finite buckets. With the underflow and overflow buckets,
+ // the total number of buckets is `num_finite_buckets` + 2.
+ // See comments on `bucket_options` for details.
+ int32 num_finite_buckets = 1;
+
+ // The i'th exponential bucket covers the interval
+ // [scale * growth_factor^(i-1), scale * growth_factor^i)
+ // where i ranges from 1 to num_finite_buckets inclusive.
+ // Must be larger than 1.0.
+ double growth_factor = 2;
+
+ // The i'th exponential bucket covers the interval
+ // [scale * growth_factor^(i-1), scale * growth_factor^i)
+ // where i ranges from 1 to num_finite_buckets inclusive.
+ // Must be > 0.
+ double scale = 3;
+ }
+
+ // Describing buckets with arbitrary user-provided width.
+ message ExplicitBuckets {
+ // 'bound' is a list of strictly increasing boundaries between
+ // buckets. Note that a list of length N-1 defines N buckets because
+ // of fenceposting. See comments on `bucket_options` for details.
+ //
+ // The i'th finite bucket covers the interval
+ // [bound[i-1], bound[i])
+ // where i ranges from 1 to bound_size() - 1. Note that there are no
+ // finite buckets at all if 'bound' only contains a single element; in
+ // that special case the single bound defines the boundary between the
+ // underflow and overflow buckets.
+ //
+ // bucket number lower bound upper bound
+ // i == 0 (underflow) -inf bound[i]
+ // 0 < i < bound_size() bound[i-1] bound[i]
+ // i == bound_size() (overflow) bound[i-1] +inf
+ repeated double bounds = 1;
+ }
+
+ // The total number of samples in the distribution. Must be >= 0.
+ int64 count = 1;
+
+ // The arithmetic mean of the samples in the distribution. If `count` is
+ // zero then this field must be zero.
+ double mean = 2;
+
+ // The minimum of the population of values. Ignored if `count` is zero.
+ double minimum = 3;
+
+ // The maximum of the population of values. Ignored if `count` is zero.
+ double maximum = 4;
+
+ // The sum of squared deviations from the mean:
+ // Sum[i=1..count]((x_i - mean)^2)
+ // where each x_i is a sample values. If `count` is zero then this field
+ // must be zero, otherwise validation of the request fails.
+ double sum_of_squared_deviation = 5;
+
+ // The number of samples in each histogram bucket. `bucket_counts` are
+ // optional. If present, they must sum to the `count` value.
+ //
+ // The buckets are defined below in `bucket_option`. There are N buckets.
+ // `bucket_counts[0]` is the number of samples in the underflow bucket.
+ // `bucket_counts[1]` to `bucket_counts[N-1]` are the numbers of samples
+ // in each of the finite buckets. And `bucket_counts[N] is the number
+ // of samples in the overflow bucket. See the comments of `bucket_option`
+ // below for more details.
+ //
+ // Any suffix of trailing zeros may be omitted.
+ repeated int64 bucket_counts = 6;
+
+ // Defines the buckets in the histogram. `bucket_option` and `bucket_counts`
+ // must be both set, or both unset.
+ //
+ // Buckets are numbered the the range of [0, N], with a total of N+1 buckets.
+ // There must be at least two buckets (a single-bucket histogram gives
+ // no information that isn't already provided by `count`).
+ //
+ // The first bucket is the underflow bucket which has a lower bound
+ // of -inf. The last bucket is the overflow bucket which has an
+ // upper bound of +inf. All other buckets (if any) are called "finite"
+ // buckets because they have finite lower and upper bounds. As described
+ // below, there are three ways to define the finite buckets.
+ //
+ // (1) Buckets with constant width.
+ // (2) Buckets with exponentially growing widths.
+ // (3) Buckets with arbitrary user-provided widths.
+ //
+ // In all cases, the buckets cover the entire real number line (-inf,
+ // +inf). Bucket upper bounds are exclusive and lower bounds are
+ // inclusive. The upper bound of the underflow bucket is equal to the
+ // lower bound of the smallest finite bucket; the lower bound of the
+ // overflow bucket is equal to the upper bound of the largest finite
+ // bucket.
+ oneof bucket_option {
+ // Buckets with constant width.
+ LinearBuckets linear_buckets = 7;
+
+ // Buckets with exponentially growing width.
+ ExponentialBuckets exponential_buckets = 8;
+
+ // Buckets with arbitrary user-provided width.
+ ExplicitBuckets explicit_buckets = 9;
+ }
+}
diff --git a/third_party/googleapis/google/api/servicecontrol/v1/log_entry.proto b/third_party/googleapis/google/api/servicecontrol/v1/log_entry.proto
new file mode 100644
index 0000000000..fddb9851ec
--- /dev/null
+++ b/third_party/googleapis/google/api/servicecontrol/v1/log_entry.proto
@@ -0,0 +1,67 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api.servicecontrol.v1;
+
+import "google/api/annotations.proto";
+import "google/logging/type/log_severity.proto";
+import "google/protobuf/any.proto";
+import "google/protobuf/struct.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/api/servicecontrol/v1;servicecontrol";
+option java_multiple_files = true;
+option java_outer_classname = "LogEntryProto";
+option java_package = "com.google.api.servicecontrol.v1";
+
+
+// An individual log entry.
+message LogEntry {
+ // Required. The log to which this log entry belongs. Examples: `"syslog"`,
+ // `"book_log"`.
+ string name = 10;
+
+ // The time the event described by the log entry occurred. If
+ // omitted, defaults to operation start time.
+ google.protobuf.Timestamp timestamp = 11;
+
+ // The severity of the log entry. The default value is
+ // `LogSeverity.DEFAULT`.
+ google.logging.type.LogSeverity severity = 12;
+
+ // A unique ID for the log entry used for deduplication. If omitted,
+ // the implementation will generate one based on operation_id.
+ string insert_id = 4;
+
+ // A set of user-defined (key, value) data that provides additional
+ // information about the log entry.
+ map<string, string> labels = 13;
+
+ // The log entry payload, which can be one of multiple types.
+ oneof payload {
+ // The log entry payload, represented as a protocol buffer that is
+ // expressed as a JSON object. You can only pass `protoPayload`
+ // values that belong to a set of approved types.
+ google.protobuf.Any proto_payload = 2;
+
+ // The log entry payload, represented as a Unicode string (UTF-8).
+ string text_payload = 3;
+
+ // The log entry payload, represented as a structure that
+ // is expressed as a JSON object.
+ google.protobuf.Struct struct_payload = 6;
+ }
+}
diff --git a/third_party/googleapis/google/api/servicecontrol/v1/metric_value.proto b/third_party/googleapis/google/api/servicecontrol/v1/metric_value.proto
new file mode 100644
index 0000000000..d5e5803b7a
--- /dev/null
+++ b/third_party/googleapis/google/api/servicecontrol/v1/metric_value.proto
@@ -0,0 +1,78 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api.servicecontrol.v1;
+
+import "google/api/annotations.proto";
+import "google/api/servicecontrol/v1/distribution.proto";
+import "google/protobuf/timestamp.proto";
+import "google/type/money.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/api/servicecontrol/v1;servicecontrol";
+option java_multiple_files = true;
+option java_outer_classname = "MetricValueSetProto";
+option java_package = "com.google.api.servicecontrol.v1";
+
+
+// Represents a single metric value.
+message MetricValue {
+ // The labels describing the metric value.
+ // See comments on [google.api.servicecontrol.v1.Operation.labels][google.api.servicecontrol.v1.Operation.labels] for
+ // the overriding relationship.
+ map<string, string> labels = 1;
+
+ // The start of the time period over which this metric value's measurement
+ // applies. The time period has different semantics for different metric
+ // types (cumulative, delta, and gauge). See the metric definition
+ // documentation in the service configuration for details.
+ google.protobuf.Timestamp start_time = 2;
+
+ // The end of the time period over which this metric value's measurement
+ // applies.
+ google.protobuf.Timestamp end_time = 3;
+
+ // The value. The type of value used in the request must
+ // agree with the metric definition in the service configuration, otherwise
+ // the MetricValue is rejected.
+ oneof value {
+ // A boolean value.
+ bool bool_value = 4;
+
+ // A signed 64-bit integer value.
+ int64 int64_value = 5;
+
+ // A double precision floating point value.
+ double double_value = 6;
+
+ // A text string value.
+ string string_value = 7;
+
+ // A distribution value.
+ Distribution distribution_value = 8;
+ }
+}
+
+// Represents a set of metric values in the same metric.
+// Each metric value in the set should have a unique combination of start time,
+// end time, and label values.
+message MetricValueSet {
+ // The metric name defined in the service configuration.
+ string metric_name = 1;
+
+ // The values in this metric.
+ repeated MetricValue metric_values = 2;
+}
diff --git a/third_party/googleapis/google/api/servicecontrol/v1/operation.proto b/third_party/googleapis/google/api/servicecontrol/v1/operation.proto
new file mode 100644
index 0000000000..568108af53
--- /dev/null
+++ b/third_party/googleapis/google/api/servicecontrol/v1/operation.proto
@@ -0,0 +1,112 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api.servicecontrol.v1;
+
+import "google/api/annotations.proto";
+import "google/api/servicecontrol/v1/log_entry.proto";
+import "google/api/servicecontrol/v1/metric_value.proto";
+import "google/protobuf/timestamp.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/api/servicecontrol/v1;servicecontrol";
+option java_multiple_files = true;
+option java_outer_classname = "OperationProto";
+option java_package = "com.google.api.servicecontrol.v1";
+
+
+// Represents information regarding an operation.
+message Operation {
+ // Defines the importance of the data contained in the operation.
+ enum Importance {
+ // The API implementation may cache and aggregate the data.
+ // The data may be lost when rare and unexpected system failures occur.
+ LOW = 0;
+
+ // The API implementation doesn't cache and aggregate the data.
+ // If the method returns successfully, it's guaranteed that the data has
+ // been persisted in durable storage.
+ HIGH = 1;
+ }
+
+ // Identity of the operation. This must be unique within the scope of the
+ // service that generated the operation. If the service calls
+ // Check() and Report() on the same operation, the two calls should carry
+ // the same id.
+ //
+ // UUID version 4 is recommended, though not required.
+ // In scenarios where an operation is computed from existing information
+ // and an idempotent id is desirable for deduplication purpose, UUID version 5
+ // is recommended. See RFC 4122 for details.
+ string operation_id = 1;
+
+ // Fully qualified name of the operation. Reserved for future use.
+ string operation_name = 2;
+
+ // Identity of the consumer who is using the service.
+ // This field should be filled in for the operations initiated by a
+ // consumer, but not for service-initiated operations that are
+ // not related to a specific consumer.
+ //
+ // This can be in one of the following formats:
+ // project:<project_id>,
+ // project_number:<project_number>,
+ // api_key:<api_key>.
+ string consumer_id = 3;
+
+ // Required. Start time of the operation.
+ google.protobuf.Timestamp start_time = 4;
+
+ // End time of the operation.
+ // Required when the operation is used in [ServiceController.Report][google.api.servicecontrol.v1.ServiceController.Report],
+ // but optional when the operation is used in [ServiceController.Check][google.api.servicecontrol.v1.ServiceController.Check].
+ google.protobuf.Timestamp end_time = 5;
+
+ // Labels describing the operation. Only the following labels are allowed:
+ //
+ // - Labels describing monitored resources as defined in
+ // the service configuration.
+ // - Default labels of metric values. When specified, labels defined in the
+ // metric value override these default.
+ // - The following labels defined by Google Cloud Platform:
+ // - `cloud.googleapis.com/location` describing the location where the
+ // operation happened,
+ // - `servicecontrol.googleapis.com/user_agent` describing the user agent
+ // of the API request,
+ // - `servicecontrol.googleapis.com/service_agent` describing the service
+ // used to handle the API request (e.g. ESP),
+ // - `servicecontrol.googleapis.com/platform` describing the platform
+ // where the API is served (e.g. GAE, GCE, GKE).
+ map<string, string> labels = 6;
+
+ // Represents information about this operation. Each MetricValueSet
+ // corresponds to a metric defined in the service configuration.
+ // The data type used in the MetricValueSet must agree with
+ // the data type specified in the metric definition.
+ //
+ // Within a single operation, it is not allowed to have more than one
+ // MetricValue instances that have the same metric names and identical
+ // label value combinations. If a request has such duplicated MetricValue
+ // instances, the entire request is rejected with
+ // an invalid argument error.
+ repeated MetricValueSet metric_value_sets = 7;
+
+ // Represents information to be logged.
+ repeated LogEntry log_entries = 8;
+
+ // DO NOT USE. This is an experimental field.
+ Importance importance = 11;
+}
diff --git a/third_party/googleapis/google/api/servicecontrol/v1/service_controller.proto b/third_party/googleapis/google/api/servicecontrol/v1/service_controller.proto
new file mode 100644
index 0000000000..2d0ec8e9fd
--- /dev/null
+++ b/third_party/googleapis/google/api/servicecontrol/v1/service_controller.proto
@@ -0,0 +1,161 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api.servicecontrol.v1;
+
+import "google/api/annotations.proto";
+import "google/api/servicecontrol/v1/check_error.proto";
+import "google/api/servicecontrol/v1/operation.proto";
+import "google/rpc/status.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/api/servicecontrol/v1;servicecontrol";
+option java_multiple_files = true;
+option java_outer_classname = "ServiceControllerProto";
+option java_package = "com.google.api.servicecontrol.v1";
+option objc_class_prefix = "GASC";
+
+
+// [Google Service Control API](/service-control/overview)
+//
+// Lets clients check and report operations against
+// a [managed service][google.api.servicemanagement.v1.ManagedService].
+service ServiceController {
+ // Checks an operation with Google Service Control to decide whether
+ // the given operation should proceed. It should be called before the
+ // operation is executed.
+ //
+ // If feasible, the client should cache the check results and reuse them for
+ // up to 60s. In case of server errors, the client may rely on the cached
+ // results for longer time.
+ //
+ // This method requires the `servicemanagement.services.check` permission
+ // on the specified service. For more information, see
+ // [Google Cloud IAM](https://cloud.google.com/iam).
+ rpc Check(CheckRequest) returns (CheckResponse) {
+ option (google.api.http) = { post: "/v1/services/{service_name}:check" body: "*" };
+ }
+
+ // Reports operations to Google Service Control. It should be called
+ // after the operation is completed.
+ //
+ // If feasible, the client should aggregate reporting data for up to 5s to
+ // reduce API traffic. Limiting aggregation to 5s is to reduce data loss
+ // during client crashes. Clients should carefully choose the aggregation
+ // window to avoid data loss risk more than 0.01% for business and
+ // compliance reasons.
+ //
+ // This method requires the `servicemanagement.services.report` permission
+ // on the specified service. For more information, see
+ // [Google Cloud IAM](https://cloud.google.com/iam).
+ rpc Report(ReportRequest) returns (ReportResponse) {
+ option (google.api.http) = { post: "/v1/services/{service_name}:report" body: "*" };
+ }
+}
+
+// Request message for the Check method.
+message CheckRequest {
+ // The service name as specified in its service configuration. For example,
+ // `"pubsub.googleapis.com"`.
+ //
+ // See [google.api.Service][google.api.Service] for the definition of a service name.
+ string service_name = 1;
+
+ // The operation to be checked.
+ Operation operation = 2;
+
+ // Specifies which version of service configuration should be used to process
+ // the request.
+ //
+ // If unspecified or no matching version can be found, the
+ // latest one will be used.
+ string service_config_id = 4;
+}
+
+// Response message for the Check method.
+message CheckResponse {
+ // The same operation_id value used in the CheckRequest.
+ // Used for logging and diagnostics purposes.
+ string operation_id = 1;
+
+ // Indicate the decision of the check.
+ //
+ // If no check errors are present, the service should process the operation.
+ // Otherwise the service should use the list of errors to determine the
+ // appropriate action.
+ repeated CheckError check_errors = 2;
+
+ // The actual config id used to process the request.
+ string service_config_id = 5;
+}
+
+// Request message for the Report method.
+message ReportRequest {
+ // The service name as specified in its service configuration. For example,
+ // `"pubsub.googleapis.com"`.
+ //
+ // See [google.api.Service][google.api.Service] for the definition of a service name.
+ string service_name = 1;
+
+ // Operations to be reported.
+ //
+ // Typically the service should report one operation per request.
+ // Putting multiple operations into a single request is allowed, but should
+ // be used only when multiple operations are natually available at the time
+ // of the report.
+ //
+ // If multiple operations are in a single request, the total request size
+ // should be no larger than 1MB. See [ReportResponse.report_errors][google.api.servicecontrol.v1.ReportResponse.report_errors] for
+ // partial failure behavior.
+ repeated Operation operations = 2;
+
+ // Specifies which version of service config should be used to process the
+ // request.
+ //
+ // If unspecified or no matching version can be found, the
+ // latest one will be used.
+ string service_config_id = 3;
+}
+
+// Response message for the Report method.
+message ReportResponse {
+ // Represents the processing error of one `Operation` in the request.
+ message ReportError {
+ // The [Operation.operation_id][google.api.servicecontrol.v1.Operation.operation_id] value from the request.
+ string operation_id = 1;
+
+ // Details of the error when processing the `Operation`.
+ google.rpc.Status status = 2;
+ }
+
+ // Partial failures, one for each `Operation` in the request that failed
+ // processing. There are three possible combinations of the RPC status:
+ //
+ // 1. The combination of a successful RPC status and an empty `report_errors`
+ // list indicates a complete success where all `Operations` in the
+ // request are processed successfully.
+ // 2. The combination of a successful RPC status and a non-empty
+ // `report_errors` list indicates a partial success where some
+ // `Operations` in the request succeeded. Each
+ // `Operation` that failed processing has a corresponding item
+ // in this list.
+ // 3. A failed RPC status indicates a complete failure where none of the
+ // `Operations` in the request succeeded.
+ repeated ReportError report_errors = 1;
+
+ // The actual config id used to process the request.
+ string service_config_id = 2;
+}
diff --git a/third_party/googleapis/google/api/servicemanagement/README.md b/third_party/googleapis/google/api/servicemanagement/README.md
new file mode 100644
index 0000000000..e3e36df498
--- /dev/null
+++ b/third_party/googleapis/google/api/servicemanagement/README.md
@@ -0,0 +1,102 @@
+Google Service Management manages a set of *services*. Service
+Management allows *service producers* to
+publish their services on Google Cloud Platform so that they can be discovered
+and used by *service consumers*. It also handles the tasks of tracking
+service lifecycle and programming various backend systems -- such as
+[Stackdriver Logging](https://cloud.google.com/stackdriver),
+[Stackdriver Monitoring](https://cloud.google.com/stackdriver) -- to support
+the managed services.
+
+If you are a service producer, you can use the Google Service Management API
+and [Google Cloud SDK (gcloud)](/sdk) to publish and manage your services.
+Each managed service has a service configuration which declares various aspects
+of the service such as its API surface, along with parameters to configure the
+supporting backend
+systems, such as logging and monitoring. If you build your service using
+[Google Cloud Endpoints](https://cloud.google.com/endpoints/), the service
+configuration will be handled automatically.
+
+If you are a service consumer and want to use a managed service, you can use the
+Google Service Management API or [Google Cloud Console](https://console.cloud.google.com)
+to activate the
+service for your [Google developer project](https://developers.google.com/console/help/new/),
+then start using its APIs and functions.
+
+## Managed services
+
+REST URL: `https://servicemanagement.googleapis.com/v1/services/{service-name}` <br />
+REST schema is defined [here](/service-management/reference/rest/v1/services).
+
+A managed service refers to a network service managed by
+Service Management. Each managed service has a unique name, such as
+`example.googleapis.com`, which must be a valid fully-qualified DNS name, as per
+RFC 1035.
+
+A managed service typically provides some REST APIs and/or other
+functions to their service consumers, such as mobile apps or cloud services.
+
+Service producers can use methods, such as
+[services.create](/service-management/reference/rest/v1/services/create),
+[services.delete](/service-management/reference/rest/v1/services/delete),
+[services.undelete](/service-management/reference/rest/v1/services/undelete),
+to manipulate their managed services.
+
+## Service producers
+
+A service producer is the Google developer project responsible for publishing
+and maintaining a managed service. Each managed service is owned by exactly one
+service producer.
+
+## Service consumers
+
+A service consumer is a Google developer project that has enabled and can
+invoke APIs on a managed service. A managed service can have many service
+consumers.
+
+## Service configuration
+
+REST URL: `https://servicemanagement.googleapis.com/v1/services/{service-name}/configs/{config_id}` <br />
+REST schema is defined [here](/service-management/reference/rest/v1/services.configs).
+
+Each managed service is described by a service configuration which covers a wide
+range of features, including its name, title, RPC API definitions,
+REST API definitions, documentation, authentication, and more.
+
+To change the configuration of a managed service, the service producer needs to
+publish an updated service configuration to Service Management.
+Service Management keeps a history of published
+service configurations, making it possible to easily retrace how a service's
+configuration evolved over time. Service configurations can be published using
+the
+[services.configs.create](/service-management/reference/rest/v1/services.configs/create)
+or [services.configs.submit](/service-management/reference/rest/v1/services.configs/submit)
+methods.
+
+Alternatively, `services.configs.submit` allows publishing an
+[OpenAPI](https://github.com/OAI/OpenAPI-Specification) specification, formerly
+known as the Swagger Specification, which is automatically converted to a
+corresponding service configuration.
+
+## Service rollout
+
+REST URL: `https://servicemanagement.googleapis.com/v1/services/{service-name}/rollouts/{rollout-id}` <br />
+REST schema is defined [here](/service-management/reference/rest/v1/services.rollouts).
+
+A `Rollout` defines how Google Service Management should deploy service
+configurations to backend systems and how the configurations take effect at
+runtime. It lets service producers specify multiple service configuration
+versions to be deployed together, and a strategy that indicates how they
+should be used.
+
+Updating a managed service's configuration can be dangerous, as a configuration
+error can lead to a service outage. To mitigate risks, Service Management
+supports gradual rollout of service configuration changes. This feature gives
+service producers time to identity potential issues and rollback service
+configuration changes in case of errors, thus minimizing the customer
+impact of bad configurations. For example, you could specify that 5% of traffic
+uses configuration 1, while the remaining 95% uses configuration 2.
+
+Service Management keeps a history of rollouts so that service
+producers can undo to previous configuration versions. You can rollback a configuration
+by initiating a new `Rollout` that clones a previously submitted
+rollout record. \ No newline at end of file
diff --git a/third_party/googleapis/google/api/servicemanagement/v1/resources.proto b/third_party/googleapis/google/api/servicemanagement/v1/resources.proto
new file mode 100644
index 0000000000..0d0f34d62e
--- /dev/null
+++ b/third_party/googleapis/google/api/servicemanagement/v1/resources.proto
@@ -0,0 +1,286 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api.servicemanagement.v1;
+
+import "google/api/annotations.proto";
+import "google/api/config_change.proto";
+import "google/api/service.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/any.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/struct.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/api/servicemanagement/v1;servicemanagement";
+option java_multiple_files = true;
+option java_outer_classname = "ResourcesProto";
+option java_package = "com.google.api.servicemanagement.v1";
+option objc_class_prefix = "GASM";
+
+
+// The full representation of a Service that is managed by
+// Google Service Management.
+message ManagedService {
+ // The name of the service. See the [overview](/service-management/overview)
+ // for naming requirements.
+ string service_name = 2;
+
+ // ID of the project that produces and owns this service.
+ string producer_project_id = 3;
+}
+
+// The metadata associated with a long running operation resource.
+message OperationMetadata {
+ // Represents the status of one operation step.
+ message Step {
+ // The short description of the step.
+ string description = 2;
+
+ // The status code.
+ Status status = 4;
+ }
+
+ // Code describes the status of one operation step.
+ enum Status {
+ // Unspecifed code.
+ STATUS_UNSPECIFIED = 0;
+
+ // The step has completed without errors.
+ DONE = 1;
+
+ // The step has not started yet.
+ NOT_STARTED = 2;
+
+ // The step is in progress.
+ IN_PROGRESS = 3;
+
+ // The step has completed with errors.
+ FAILED = 4;
+
+ // The step has completed with cancellation.
+ CANCELLED = 5;
+ }
+
+ // The full name of the resources that this operation is directly
+ // associated with.
+ repeated string resource_names = 1;
+
+ // Detailed status information for each step. The order is undetermined.
+ repeated Step steps = 2;
+
+ // Percentage of completion of this operation, ranging from 0 to 100.
+ int32 progress_percentage = 3;
+
+ // The start time of the operation.
+ google.protobuf.Timestamp start_time = 4;
+}
+
+// Represents a diagnostic message (error or warning)
+message Diagnostic {
+ // The kind of diagnostic information possible.
+ enum Kind {
+ // Warnings and errors
+ WARNING = 0;
+
+ // Only errors
+ ERROR = 1;
+ }
+
+ // File name and line number of the error or warning.
+ string location = 1;
+
+ // The kind of diagnostic information provided.
+ Kind kind = 2;
+
+ // Message describing the error or warning.
+ string message = 3;
+}
+
+// Represents a source file which is used to generate the service configuration
+// defined by `google.api.Service`.
+message ConfigSource {
+ // A unique ID for a specific instance of this message, typically assigned
+ // by the client for tracking purpose. If empty, the server may choose to
+ // generate one instead.
+ string id = 5;
+
+ // Set of source configuration files that are used to generate a service
+ // configuration (`google.api.Service`).
+ repeated ConfigFile files = 2;
+}
+
+// Generic specification of a source configuration file
+message ConfigFile {
+ enum FileType {
+ // Unknown file type.
+ FILE_TYPE_UNSPECIFIED = 0;
+
+ // YAML-specification of service.
+ SERVICE_CONFIG_YAML = 1;
+
+ // OpenAPI specification, serialized in JSON.
+ OPEN_API_JSON = 2;
+
+ // OpenAPI specification, serialized in YAML.
+ OPEN_API_YAML = 3;
+
+ // FileDescriptorSet, generated by protoc.
+ //
+ // To generate, use protoc with imports and source info included.
+ // For an example test.proto file, the following command would put the value
+ // in a new file named out.pb.
+ //
+ // $protoc --include_imports --include_source_info test.proto -o out.pb
+ FILE_DESCRIPTOR_SET_PROTO = 4;
+ }
+
+ // The file name of the configuration file (full or relative path).
+ string file_path = 1;
+
+ // The bytes that constitute the file.
+ bytes file_contents = 3;
+
+ // The type of configuration file this represents.
+ FileType file_type = 4;
+}
+
+// Represents a service configuration with its name and id.
+message ConfigRef {
+ // Resource name of a service config. It must have the following
+ // format: "services/{service name}/configs/{config id}".
+ string name = 1;
+}
+
+// Change report associated with a particular service configuration.
+//
+// It contains a list of ConfigChanges based on the comparison between
+// two service configurations.
+message ChangeReport {
+ // List of changes between two service configurations.
+ // The changes will be alphabetically sorted based on the identifier
+ // of each change.
+ // A ConfigChange identifier is a dot separated path to the configuration.
+ // Example: visibility.rules[selector='LibraryService.CreateBook'].restriction
+ repeated google.api.ConfigChange config_changes = 1;
+}
+
+// A rollout resource that defines how service configuration versions are pushed
+// to control plane systems. Typically, you create a new version of the
+// service config, and then create a Rollout to push the service config.
+message Rollout {
+ // Strategy that specifies how Google Service Control should select
+ // different
+ // versions of service configurations based on traffic percentage.
+ //
+ // One example of how to gradually rollout a new service configuration using
+ // this
+ // strategy:
+ // Day 1
+ //
+ // Rollout {
+ // id: "example.googleapis.com/rollout_20160206"
+ // traffic_percent_strategy {
+ // percentages: {
+ // "example.googleapis.com/20160201": 70.00
+ // "example.googleapis.com/20160206": 30.00
+ // }
+ // }
+ // }
+ //
+ // Day 2
+ //
+ // Rollout {
+ // id: "example.googleapis.com/rollout_20160207"
+ // traffic_percent_strategy: {
+ // percentages: {
+ // "example.googleapis.com/20160206": 100.00
+ // }
+ // }
+ // }
+ message TrafficPercentStrategy {
+ // Maps service configuration IDs to their corresponding traffic percentage.
+ // Key is the service configuration ID, Value is the traffic percentage
+ // which must be greater than 0.0 and the sum must equal to 100.0.
+ map<string, double> percentages = 1;
+ }
+
+ // Strategy used to delete a service. This strategy is a placeholder only
+ // used by the system generated rollout to delete a service.
+ message DeleteServiceStrategy {
+
+ }
+
+ // Status of a Rollout.
+ enum RolloutStatus {
+ // No status specified.
+ ROLLOUT_STATUS_UNSPECIFIED = 0;
+
+ // The Rollout is in progress.
+ IN_PROGRESS = 1;
+
+ // The Rollout has completed successfully.
+ SUCCESS = 2;
+
+ // The Rollout has been cancelled. This can happen if you have overlapping
+ // Rollout pushes, and the previous ones will be cancelled.
+ CANCELLED = 3;
+
+ // The Rollout has failed. It is typically caused by configuration errors.
+ FAILED = 4;
+
+ // The Rollout has not started yet and is pending for execution.
+ PENDING = 5;
+ }
+
+ // Optional unique identifier of this Rollout. Only lower case letters, digits
+ // and '-' are allowed.
+ //
+ // If not specified by client, the server will generate one. The generated id
+ // will have the form of <date><revision number>, where "date" is the create
+ // date in ISO 8601 format. "revision number" is a monotonically increasing
+ // positive number that is reset every day for each service.
+ // An example of the generated rollout_id is '2016-02-16r1'
+ string rollout_id = 1;
+
+ // Creation time of the rollout. Readonly.
+ google.protobuf.Timestamp create_time = 2;
+
+ // The user who created the Rollout. Readonly.
+ string created_by = 3;
+
+ // The status of this rollout. Readonly. In case of a failed rollout,
+ // the system will automatically rollback to the current Rollout
+ // version. Readonly.
+ RolloutStatus status = 4;
+
+ // Strategy that defines which versions of service configurations should be
+ // pushed
+ // and how they should be used at runtime.
+ oneof strategy {
+ // Google Service Control selects service configurations based on
+ // traffic percentage.
+ TrafficPercentStrategy traffic_percent_strategy = 5;
+
+ // The strategy associated with a rollout to delete a `ManagedService`.
+ // Readonly.
+ DeleteServiceStrategy delete_service_strategy = 200;
+ }
+
+ // The name of the service associated with this Rollout.
+ string service_name = 8;
+}
diff --git a/third_party/googleapis/google/api/servicemanagement/v1/servicemanager.proto b/third_party/googleapis/google/api/servicemanagement/v1/servicemanager.proto
new file mode 100644
index 0000000000..62af1893ec
--- /dev/null
+++ b/third_party/googleapis/google/api/servicemanagement/v1/servicemanager.proto
@@ -0,0 +1,392 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api.servicemanagement.v1;
+
+import "google/api/annotations.proto";
+import "google/api/service.proto";
+import "google/api/servicemanagement/v1/resources.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/any.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/struct.proto";
+import "google/rpc/status.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/api/servicemanagement/v1;servicemanagement";
+option java_multiple_files = true;
+option java_outer_classname = "ServiceManagerProto";
+option java_package = "com.google.api.servicemanagement.v1";
+option objc_class_prefix = "GASM";
+
+
+// [Google Service Management API](/service-management/overview)
+service ServiceManager {
+ // Lists all managed services.
+ rpc ListServices(ListServicesRequest) returns (ListServicesResponse) {
+ option (google.api.http) = { get: "/v1/services" };
+ }
+
+ // Gets a managed service.
+ rpc GetService(GetServiceRequest) returns (ManagedService) {
+ option (google.api.http) = { get: "/v1/services/{service_name}" };
+ }
+
+ // Creates a new managed service.
+ // Please note one producer project can own no more than 20 services.
+ //
+ // Operation<response: ManagedService>
+ rpc CreateService(CreateServiceRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1/services" body: "service" };
+ }
+
+ // Deletes a managed service. This method will change the serivce in the
+ // `Soft-Delete` state for 30 days. Within this period, service producers may
+ // call [UndeleteService][google.api.servicemanagement.v1.ServiceManager.UndeleteService] to restore the service.
+ // After 30 days, the service will be permanently deleted.
+ //
+ // Operation<response: google.protobuf.Empty>
+ rpc DeleteService(DeleteServiceRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { delete: "/v1/services/{service_name}" };
+ }
+
+ // Revives a previously deleted managed service. The method restores the
+ // service using the configuration at the time the service was deleted.
+ // The target service must exist and must have been deleted within the
+ // last 30 days.
+ //
+ // Operation<response: UndeleteServiceResponse>
+ rpc UndeleteService(UndeleteServiceRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1/services/{service_name}:undelete" body: "" };
+ }
+
+ // Lists the history of the service configuration for a managed service,
+ // from the newest to the oldest.
+ rpc ListServiceConfigs(ListServiceConfigsRequest) returns (ListServiceConfigsResponse) {
+ option (google.api.http) = { get: "/v1/services/{service_name}/configs" };
+ }
+
+ // Gets a service configuration (version) for a managed service.
+ rpc GetServiceConfig(GetServiceConfigRequest) returns (google.api.Service) {
+ option (google.api.http) = { get: "/v1/services/{service_name}/configs/{config_id}" };
+ }
+
+ // Creates a new service configuration (version) for a managed service.
+ // This method only stores the service configuration. To roll out the service
+ // configuration to backend systems please call
+ // [CreateServiceRollout][google.api.servicemanagement.v1.ServiceManager.CreateServiceRollout].
+ rpc CreateServiceConfig(CreateServiceConfigRequest) returns (google.api.Service) {
+ option (google.api.http) = { post: "/v1/services/{service_name}/configs" body: "service_config" };
+ }
+
+ // Creates a new service configuration (version) for a managed service based
+ // on
+ // user-supplied configuration source files (for example: OpenAPI
+ // Specification). This method stores the source configurations as well as the
+ // generated service configuration. To rollout the service configuration to
+ // other services,
+ // please call [CreateServiceRollout][google.api.servicemanagement.v1.ServiceManager.CreateServiceRollout].
+ //
+ // Operation<response: SubmitConfigSourceResponse>
+ rpc SubmitConfigSource(SubmitConfigSourceRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1/services/{service_name}/configs:submit" body: "*" };
+ }
+
+ // Lists the history of the service configuration rollouts for a managed
+ // service, from the newest to the oldest.
+ rpc ListServiceRollouts(ListServiceRolloutsRequest) returns (ListServiceRolloutsResponse) {
+ option (google.api.http) = { get: "/v1/services/{service_name}/rollouts" };
+ }
+
+ // Gets a service configuration [rollout][google.api.servicemanagement.v1.Rollout].
+ rpc GetServiceRollout(GetServiceRolloutRequest) returns (Rollout) {
+ option (google.api.http) = { get: "/v1/services/{service_name}/rollouts/{rollout_id}" };
+ }
+
+ // Creates a new service configuration rollout. Based on rollout, the
+ // Google Service Management will roll out the service configurations to
+ // different backend services. For example, the logging configuration will be
+ // pushed to Google Cloud Logging.
+ //
+ // Please note that any previous pending and running Rollouts and associated
+ // Operations will be automatically cancelled so that the latest Rollout will
+ // not be blocked by previous Rollouts.
+ //
+ // Operation<response: Rollout>
+ rpc CreateServiceRollout(CreateServiceRolloutRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1/services/{service_name}/rollouts" body: "rollout" };
+ }
+
+ // Generates and returns a report (errors, warnings and changes from
+ // existing configurations) associated with
+ // GenerateConfigReportRequest.new_value
+ //
+ // If GenerateConfigReportRequest.old_value is specified,
+ // GenerateConfigReportRequest will contain a single ChangeReport based on the
+ // comparison between GenerateConfigReportRequest.new_value and
+ // GenerateConfigReportRequest.old_value.
+ // If GenerateConfigReportRequest.old_value is not specified, this method
+ // will compare GenerateConfigReportRequest.new_value with the last pushed
+ // service configuration.
+ rpc GenerateConfigReport(GenerateConfigReportRequest) returns (GenerateConfigReportResponse) {
+ option (google.api.http) = { post: "/v1/services:generateConfigReport" body: "*" };
+ }
+
+ // Enable a managed service for a project with default setting.
+ //
+ // Operation<response: EnableServiceResponse>
+ //
+ // [google.rpc.Status][google.rpc.Status] errors may contain a
+ // [google.rpc.PreconditionFailure][] error detail.
+ rpc EnableService(EnableServiceRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1/services/{service_name}:enable" body: "*" };
+ }
+
+ // Disable a managed service for a project.
+ //
+ // Operation<response: DisableServiceResponse>
+ rpc DisableService(DisableServiceRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1/services/{service_name}:disable" body: "*" };
+ }
+}
+
+// Request message for `ListServices` method.
+message ListServicesRequest {
+ // Include services produced by the specified project.
+ string producer_project_id = 1;
+
+ // Requested size of the next page of data.
+ int32 page_size = 5;
+
+ // Token identifying which result to start with; returned by a previous list
+ // call.
+ string page_token = 6;
+}
+
+// Response message for `ListServices` method.
+message ListServicesResponse {
+ // The results of the query.
+ repeated ManagedService services = 1;
+
+ // Token that can be passed to `ListServices` to resume a paginated query.
+ string next_page_token = 2;
+}
+
+// Request message for `GetService` method.
+message GetServiceRequest {
+ // The name of the service. See the `ServiceManager` overview for naming
+ // requirements. For example: `example.googleapis.com`.
+ string service_name = 1;
+}
+
+// Request message for CreateService method.
+message CreateServiceRequest {
+ // Initial values for the service resource.
+ ManagedService service = 1;
+}
+
+// Request message for DeleteService method.
+message DeleteServiceRequest {
+ // The name of the service. See the [overview](/service-management/overview)
+ // for naming requirements. For example: `example.googleapis.com`.
+ string service_name = 1;
+}
+
+// Request message for UndeleteService method.
+message UndeleteServiceRequest {
+ // The name of the service. See the [overview](/service-management/overview)
+ // for naming requirements. For example: `example.googleapis.com`.
+ string service_name = 1;
+}
+
+// Response message for UndeleteService method.
+message UndeleteServiceResponse {
+ // Revived service resource.
+ ManagedService service = 1;
+}
+
+// Request message for GetServiceConfig method.
+message GetServiceConfigRequest {
+ // The name of the service. See the [overview](/service-management/overview)
+ // for naming requirements. For example: `example.googleapis.com`.
+ string service_name = 1;
+
+ string config_id = 2;
+}
+
+// Request message for ListServiceConfigs method.
+message ListServiceConfigsRequest {
+ // The name of the service. See the [overview](/service-management/overview)
+ // for naming requirements. For example: `example.googleapis.com`.
+ string service_name = 1;
+
+ // The token of the page to retrieve.
+ string page_token = 2;
+
+ // The max number of items to include in the response list.
+ int32 page_size = 3;
+}
+
+// Response message for ListServiceConfigs method.
+message ListServiceConfigsResponse {
+ // The list of service configuration resources.
+ repeated google.api.Service service_configs = 1;
+
+ // The token of the next page of results.
+ string next_page_token = 2;
+}
+
+// Request message for CreateServiceConfig method.
+message CreateServiceConfigRequest {
+ // The name of the service. See the [overview](/service-management/overview)
+ // for naming requirements. For example: `example.googleapis.com`.
+ string service_name = 1;
+
+ // The service configuration resource.
+ google.api.Service service_config = 2;
+}
+
+// Request message for SubmitConfigSource method.
+message SubmitConfigSourceRequest {
+ // The name of the service. See the [overview](/service-management/overview)
+ // for naming requirements. For example: `example.googleapis.com`.
+ string service_name = 1;
+
+ // The source configuration for the service.
+ ConfigSource config_source = 2;
+
+ // Optional. If set, this will result in the generation of a
+ // `google.api.Service` configuration based on the `ConfigSource` provided,
+ // but the generated config and the sources will NOT be persisted.
+ bool validate_only = 3;
+}
+
+// Response message for SubmitConfigSource method.
+message SubmitConfigSourceResponse {
+ // The generated service configuration.
+ google.api.Service service_config = 1;
+}
+
+// Request message for 'CreateServiceRollout'
+message CreateServiceRolloutRequest {
+ // The name of the service. See the [overview](/service-management/overview)
+ // for naming requirements. For example: `example.googleapis.com`.
+ string service_name = 1;
+
+ // The rollout resource. The `service_name` field is output only.
+ Rollout rollout = 2;
+}
+
+// Request message for 'ListServiceRollouts'
+message ListServiceRolloutsRequest {
+ // The name of the service. See the [overview](/service-management/overview)
+ // for naming requirements. For example: `example.googleapis.com`.
+ string service_name = 1;
+
+ // The token of the page to retrieve.
+ string page_token = 2;
+
+ // The max number of items to include in the response list.
+ int32 page_size = 3;
+}
+
+// Response message for ListServiceRollouts method.
+message ListServiceRolloutsResponse {
+ // The list of rollout resources.
+ repeated Rollout rollouts = 1;
+
+ // The token of the next page of results.
+ string next_page_token = 2;
+}
+
+// Request message for GetServiceRollout method.
+message GetServiceRolloutRequest {
+ // The name of the service. See the [overview](/service-management/overview)
+ // for naming requirements. For example: `example.googleapis.com`.
+ string service_name = 1;
+
+ // The id of the rollout resource.
+ string rollout_id = 2;
+}
+
+// Request message for EnableService method.
+message EnableServiceRequest {
+ // Name of the service to enable. Specifying an unknown service name will
+ // cause the request to fail.
+ string service_name = 1;
+
+ // The identity of consumer resource which service enablement will be
+ // applied to.
+ //
+ // The Google Service Management implementation accepts the following
+ // forms: "project:<project_id>", "project_number:<project_number>".
+ //
+ // Note: this is made compatible with
+ // google.api.servicecontrol.v1.Operation.consumer_id.
+ string consumer_id = 2;
+}
+
+// Request message for DisableService method.
+message DisableServiceRequest {
+ // Name of the service to disable. Specifying an unknown service name
+ // will cause the request to fail.
+ string service_name = 1;
+
+ // The identity of consumer resource which service disablement will be
+ // applied to.
+ //
+ // The Google Service Management implementation accepts the following
+ // forms: "project:<project_id>", "project_number:<project_number>".
+ //
+ // Note: this is made compatible with
+ // google.api.servicecontrol.v1.Operation.consumer_id.
+ string consumer_id = 2;
+}
+
+// Request message for GenerateConfigReport method.
+message GenerateConfigReportRequest {
+ // Service configuration for which we want to generate the report.
+ // For this version of API, the supported types are
+ // [google.api.servicemanagement.v1.ConfigRef][google.api.servicemanagement.v1.ConfigRef],
+ // [google.api.servicemanagement.v1.ConfigSource][google.api.servicemanagement.v1.ConfigSource],
+ // and [google.api.Service][google.api.Service]
+ google.protobuf.Any new_config = 1;
+
+ // Service configuration against which the comparison will be done.
+ // For this version of API, the supported types are
+ // [google.api.servicemanagement.v1.ConfigRef][google.api.servicemanagement.v1.ConfigRef],
+ // [google.api.servicemanagement.v1.ConfigSource][google.api.servicemanagement.v1.ConfigSource],
+ // and [google.api.Service][google.api.Service]
+ google.protobuf.Any old_config = 2;
+}
+
+// Response message for GenerateConfigReport method.
+message GenerateConfigReportResponse {
+ // Name of the service this report belongs to.
+ string service_name = 1;
+
+ // ID of the service configuration this report belongs to.
+ string id = 2;
+
+ // list of ChangeReport, each corresponding to comparison between two
+ // service configurations.
+ repeated ChangeReport change_reports = 3;
+
+ // Errors / Linter warnings associated with the service definition this
+ // report
+ // belongs to.
+ repeated Diagnostic diagnostics = 4;
+}
diff --git a/third_party/googleapis/google/api/source_info.proto b/third_party/googleapis/google/api/source_info.proto
new file mode 100644
index 0000000000..5d0f7bd721
--- /dev/null
+++ b/third_party/googleapis/google/api/source_info.proto
@@ -0,0 +1,32 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google/protobuf/any.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig";
+option java_multiple_files = true;
+option java_outer_classname = "SourceInfoProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// Source information used to create a Service Config
+message SourceInfo {
+ // All files used during config generation.
+ repeated google.protobuf.Any source_files = 1;
+}
diff --git a/third_party/googleapis/google/api/system_parameter.proto b/third_party/googleapis/google/api/system_parameter.proto
new file mode 100644
index 0000000000..ed36a3d0bc
--- /dev/null
+++ b/third_party/googleapis/google/api/system_parameter.proto
@@ -0,0 +1,96 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig";
+option java_multiple_files = true;
+option java_outer_classname = "SystemParameterProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// ### System parameter configuration
+//
+// A system parameter is a special kind of parameter defined by the API
+// system, not by an individual API. It is typically mapped to an HTTP header
+// and/or a URL query parameter. This configuration specifies which methods
+// change the names of the system parameters.
+message SystemParameters {
+ // Define system parameters.
+ //
+ // The parameters defined here will override the default parameters
+ // implemented by the system. If this field is missing from the service
+ // config, default system parameters will be used. Default system parameters
+ // and names is implementation-dependent.
+ //
+ // Example: define api key for all methods
+ //
+ // system_parameters
+ // rules:
+ // - selector: "*"
+ // parameters:
+ // - name: api_key
+ // url_query_parameter: api_key
+ //
+ //
+ // Example: define 2 api key names for a specific method.
+ //
+ // system_parameters
+ // rules:
+ // - selector: "/ListShelves"
+ // parameters:
+ // - name: api_key
+ // http_header: Api-Key1
+ // - name: api_key
+ // http_header: Api-Key2
+ //
+ // **NOTE:** All service configuration rules follow "last one wins" order.
+ repeated SystemParameterRule rules = 1;
+}
+
+// Define a system parameter rule mapping system parameter definitions to
+// methods.
+message SystemParameterRule {
+ // Selects the methods to which this rule applies. Use '*' to indicate all
+ // methods in all APIs.
+ //
+ // Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+ string selector = 1;
+
+ // Define parameters. Multiple names may be defined for a parameter.
+ // For a given method call, only one of them should be used. If multiple
+ // names are used the behavior is implementation-dependent.
+ // If none of the specified names are present the behavior is
+ // parameter-dependent.
+ repeated SystemParameter parameters = 2;
+}
+
+// Define a parameter's name and location. The parameter may be passed as either
+// an HTTP header or a URL query parameter, and if both are passed the behavior
+// is implementation-dependent.
+message SystemParameter {
+ // Define the name of the parameter, such as "api_key" . It is case sensitive.
+ string name = 1;
+
+ // Define the HTTP header name to use for the parameter. It is case
+ // insensitive.
+ string http_header = 2;
+
+ // Define the URL query parameter name to use for the parameter. It is case
+ // sensitive.
+ string url_query_parameter = 3;
+}
diff --git a/third_party/googleapis/google/api/usage.proto b/third_party/googleapis/google/api/usage.proto
new file mode 100644
index 0000000000..29c601382d
--- /dev/null
+++ b/third_party/googleapis/google/api/usage.proto
@@ -0,0 +1,85 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google/api/annotations.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig";
+option java_multiple_files = true;
+option java_outer_classname = "UsageProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// Configuration controlling usage of a service.
+message Usage {
+ // Requirements that must be satisfied before a consumer project can use the
+ // service. Each requirement is of the form <service.name>/<requirement-id>;
+ // for example 'serviceusage.googleapis.com/billing-enabled'.
+ repeated string requirements = 1;
+
+ // A list of usage rules that apply to individual API methods.
+ //
+ // **NOTE:** All service configuration rules follow "last one wins" order.
+ repeated UsageRule rules = 6;
+
+ // The full resource name of a channel used for sending notifications to the
+ // service producer.
+ //
+ // Google Service Management currently only supports
+ // [Google Cloud Pub/Sub](https://cloud.google.com/pubsub) as a notification
+ // channel. To use Google Cloud Pub/Sub as the channel, this must be the name
+ // of a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name format
+ // documented in https://cloud.google.com/pubsub/docs/overview.
+ string producer_notification_channel = 7;
+}
+
+// Usage configuration rules for the service.
+//
+// NOTE: Under development.
+//
+//
+// Use this rule to configure unregistered calls for the service. Unregistered
+// calls are calls that do not contain consumer project identity.
+// (Example: calls that do not contain an API key).
+// By default, API methods do not allow unregistered calls, and each method call
+// must be identified by a consumer project identity. Use this rule to
+// allow/disallow unregistered calls.
+//
+// Example of an API that wants to allow unregistered calls for entire service.
+//
+// usage:
+// rules:
+// - selector: "*"
+// allow_unregistered_calls: true
+//
+// Example of a method that wants to allow unregistered calls.
+//
+// usage:
+// rules:
+// - selector: "google.example.library.v1.LibraryService.CreateBook"
+// allow_unregistered_calls: true
+message UsageRule {
+ // Selects the methods to which this rule applies. Use '*' to indicate all
+ // methods in all APIs.
+ //
+ // Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+ string selector = 1;
+
+ // True, if the method allows unregistered calls; false otherwise.
+ bool allow_unregistered_calls = 2;
+}
diff --git a/third_party/googleapis/google/appengine/README.md b/third_party/googleapis/google/appengine/README.md
new file mode 100644
index 0000000000..54e45e6f8d
--- /dev/null
+++ b/third_party/googleapis/google/appengine/README.md
@@ -0,0 +1,12 @@
+# Google App Engine Admin API
+
+## Overview
+
+The Google App Engine Admin API is a RESTful API for managing App Engine
+applications. The Admin API provides programmatic access to several of the App
+Engine administrative operations that are found in the
+[Google Cloud Platform Console](https://cloud.google.com/appengine/docs/developers-console).
+
+## Documentation
+
+[Google App Engine Admin API Documentation](https://cloud.google.com/appengine/docs/admin-api/) \ No newline at end of file
diff --git a/third_party/googleapis/google/appengine/legacy/audit_data.proto b/third_party/googleapis/google/appengine/legacy/audit_data.proto
new file mode 100644
index 0000000000..75c2a9b208
--- /dev/null
+++ b/third_party/googleapis/google/appengine/legacy/audit_data.proto
@@ -0,0 +1,34 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.appengine.legacy;
+
+option go_package = "google.golang.org/genproto/googleapis/appengine/legacy;legacy";
+option java_multiple_files = true;
+option java_outer_classname = "AuditDataProto";
+option java_package = "com.google.appengine.legacy";
+
+
+// Admin Console legacy audit log.
+message AuditData {
+ // Text description of the admin event.
+ // This is the "Event" column in Admin Console's Admin Logs.
+ string event_message = 1;
+
+ // Arbitrary event data.
+ // This is the "Result" column in Admin Console's Admin Logs.
+ map<string, string> event_data = 2;
+}
diff --git a/third_party/googleapis/google/appengine/logging/v1/request_log.proto b/third_party/googleapis/google/appengine/logging/v1/request_log.proto
new file mode 100644
index 0000000000..678ea9a5ec
--- /dev/null
+++ b/third_party/googleapis/google/appengine/logging/v1/request_log.proto
@@ -0,0 +1,190 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.appengine.logging.v1;
+
+import "google/logging/type/log_severity.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/appengine/logging/v1;logging";
+option java_multiple_files = true;
+option java_outer_classname = "RequestLogProto";
+option java_package = "com.google.appengine.logging.v1";
+
+
+// Application log line emitted while processing a request.
+message LogLine {
+ // Approximate time when this log entry was made.
+ google.protobuf.Timestamp time = 1;
+
+ // Severity of this log entry.
+ google.logging.type.LogSeverity severity = 2;
+
+ // App-provided log message.
+ string log_message = 3;
+
+ // Where in the source code this log message was written.
+ SourceLocation source_location = 4;
+}
+
+// Specifies a location in a source code file.
+message SourceLocation {
+ // Source file name. Depending on the runtime environment, this might be a
+ // simple name or a fully-qualified name.
+ string file = 1;
+
+ // Line within the source file.
+ int64 line = 2;
+
+ // Human-readable name of the function or method being invoked, with optional
+ // context such as the class or package name. This information is used in
+ // contexts such as the logs viewer, where a file and line number are less
+ // meaningful. The format can vary by language. For example:
+ // `qual.if.ied.Class.method` (Java), `dir/package.func` (Go), `function`
+ // (Python).
+ string function_name = 3;
+}
+
+// A reference to a particular snapshot of the source tree used to build and
+// deploy an application.
+message SourceReference {
+ // Optional. A URI string identifying the repository.
+ // Example: "https://github.com/GoogleCloudPlatform/kubernetes.git"
+ string repository = 1;
+
+ // The canonical and persistent identifier of the deployed revision.
+ // Example (git): "0035781c50ec7aa23385dc841529ce8a4b70db1b"
+ string revision_id = 2;
+}
+
+// Complete log information about a single HTTP request to an App Engine
+// application.
+message RequestLog {
+ // Application that handled this request.
+ string app_id = 1;
+
+ // Module of the application that handled this request.
+ string module_id = 37;
+
+ // Version of the application that handled this request.
+ string version_id = 2;
+
+ // Globally unique identifier for a request, which is based on the request
+ // start time. Request IDs for requests which started later will compare
+ // greater as strings than those for requests which started earlier.
+ string request_id = 3;
+
+ // Origin IP address.
+ string ip = 4;
+
+ // Time when the request started.
+ google.protobuf.Timestamp start_time = 6;
+
+ // Time when the request finished.
+ google.protobuf.Timestamp end_time = 7;
+
+ // Latency of the request.
+ google.protobuf.Duration latency = 8;
+
+ // Number of CPU megacycles used to process request.
+ int64 mega_cycles = 9;
+
+ // Request method. Example: `"GET"`, `"HEAD"`, `"PUT"`, `"POST"`, `"DELETE"`.
+ string method = 10;
+
+ // Contains the path and query portion of the URL that was requested. For
+ // example, if the URL was "http://example.com/app?name=val", the resource
+ // would be "/app?name=val". The fragment identifier, which is identified by
+ // the `#` character, is not included.
+ string resource = 11;
+
+ // HTTP version of request. Example: `"HTTP/1.1"`.
+ string http_version = 12;
+
+ // HTTP response status code. Example: 200, 404.
+ int32 status = 13;
+
+ // Size in bytes sent back to client by request.
+ int64 response_size = 14;
+
+ // Referrer URL of request.
+ string referrer = 15;
+
+ // User agent that made the request.
+ string user_agent = 16;
+
+ // The logged-in user who made the request.
+ //
+ // Most likely, this is the part of the user's email before the `@` sign. The
+ // field value is the same for different requests from the same user, but
+ // different users can have similar names. This information is also
+ // available to the application via the App Engine Users API.
+ //
+ // This field will be populated starting with App Engine 1.9.21.
+ string nickname = 40;
+
+ // File or class that handled the request.
+ string url_map_entry = 17;
+
+ // Internet host and port number of the resource being requested.
+ string host = 20;
+
+ // An indication of the relative cost of serving this request.
+ double cost = 21;
+
+ // Queue name of the request, in the case of an offline request.
+ string task_queue_name = 22;
+
+ // Task name of the request, in the case of an offline request.
+ string task_name = 23;
+
+ // Whether this was a loading request for the instance.
+ bool was_loading_request = 24;
+
+ // Time this request spent in the pending request queue.
+ google.protobuf.Duration pending_time = 25;
+
+ // If the instance processing this request belongs to a manually scaled
+ // module, then this is the 0-based index of the instance. Otherwise, this
+ // value is -1.
+ int32 instance_index = 26;
+
+ // Whether this request is finished or active.
+ bool finished = 27;
+
+ // Whether this is the first `RequestLog` entry for this request. If an
+ // active request has several `RequestLog` entries written to Stackdriver
+ // Logging, then this field will be set for one of them.
+ bool first = 42;
+
+ // An identifier for the instance that handled the request.
+ string instance_id = 28;
+
+ // A list of log lines emitted by the application while serving this request.
+ repeated LogLine line = 29;
+
+ // App Engine release version.
+ string app_engine_release = 38;
+
+ // Stackdriver Trace identifier for this request.
+ string trace_id = 39;
+
+ // Source code for the application that handled this request. There can be
+ // more than one source reference per deployed application if source code is
+ // distributed among multiple repositories.
+ repeated SourceReference source_reference = 41;
+}
diff --git a/third_party/googleapis/google/appengine/v1/app_yaml.proto b/third_party/googleapis/google/appengine/v1/app_yaml.proto
new file mode 100644
index 0000000000..2892ef30d6
--- /dev/null
+++ b/third_party/googleapis/google/appengine/v1/app_yaml.proto
@@ -0,0 +1,285 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.appengine.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/duration.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/appengine/v1;appengine";
+option java_multiple_files = true;
+option java_outer_classname = "AppYamlProto";
+option java_package = "com.google.appengine.v1";
+
+
+// [Google Cloud Endpoints](https://cloud.google.com/appengine/docs/python/endpoints/)
+// configuration for API handlers.
+message ApiConfigHandler {
+ // Action to take when users access resources that require
+ // authentication. Defaults to `redirect`.
+ AuthFailAction auth_fail_action = 1;
+
+ // Level of login required to access this resource. Defaults to
+ // `optional`.
+ LoginRequirement login = 2;
+
+ // Path to the script from the application root directory.
+ string script = 3;
+
+ // Security (HTTPS) enforcement for this URL.
+ SecurityLevel security_level = 4;
+
+ // URL to serve the endpoint at.
+ string url = 5;
+}
+
+// Custom static error page to be served when an error occurs.
+message ErrorHandler {
+ // Error codes.
+ enum ErrorCode {
+ option allow_alias = true;
+ // Not specified. ERROR_CODE_DEFAULT is assumed.
+ ERROR_CODE_UNSPECIFIED = 0;
+
+ // All other error types.
+ ERROR_CODE_DEFAULT = 0;
+
+ // Application has exceeded a resource quota.
+ ERROR_CODE_OVER_QUOTA = 1;
+
+ // Client blocked by the application's Denial of Service protection
+ // configuration.
+ ERROR_CODE_DOS_API_DENIAL = 2;
+
+ // Deadline reached before the application responds.
+ ERROR_CODE_TIMEOUT = 3;
+ }
+
+ // Error condition this handler applies to.
+ ErrorCode error_code = 1;
+
+ // Static file content to be served for this error.
+ string static_file = 2;
+
+ // MIME type of file. Defaults to `text/html`.
+ string mime_type = 3;
+}
+
+// URL pattern and description of how the URL should be handled. App Engine can
+// handle URLs by executing application code or by serving static files
+// uploaded with the version, such as images, CSS, or JavaScript.
+message UrlMap {
+ // Redirect codes.
+ enum RedirectHttpResponseCode {
+ // Not specified. `302` is assumed.
+ REDIRECT_HTTP_RESPONSE_CODE_UNSPECIFIED = 0;
+
+ // `301 Moved Permanently` code.
+ REDIRECT_HTTP_RESPONSE_CODE_301 = 1;
+
+ // `302 Moved Temporarily` code.
+ REDIRECT_HTTP_RESPONSE_CODE_302 = 2;
+
+ // `303 See Other` code.
+ REDIRECT_HTTP_RESPONSE_CODE_303 = 3;
+
+ // `307 Temporary Redirect` code.
+ REDIRECT_HTTP_RESPONSE_CODE_307 = 4;
+ }
+
+ // URL prefix. Uses regular expression syntax, which means regexp
+ // special characters must be escaped, but should not contain groupings.
+ // All URLs that begin with this prefix are handled by this handler, using the
+ // portion of the URL after the prefix as part of the file path.
+ string url_regex = 1;
+
+ // Type of handler for this URL pattern.
+ oneof handler_type {
+ // Returns the contents of a file, such as an image, as the response.
+ StaticFilesHandler static_files = 2;
+
+ // Executes a script to handle the request that matches this URL
+ // pattern.
+ ScriptHandler script = 3;
+
+ // Uses API Endpoints to handle requests.
+ ApiEndpointHandler api_endpoint = 4;
+ }
+
+ // Security (HTTPS) enforcement for this URL.
+ SecurityLevel security_level = 5;
+
+ // Level of login required to access this resource.
+ LoginRequirement login = 6;
+
+ // Action to take when users access resources that require
+ // authentication. Defaults to `redirect`.
+ AuthFailAction auth_fail_action = 7;
+
+ // `30x` code to use when performing redirects for the `secure` field.
+ // Defaults to `302`.
+ RedirectHttpResponseCode redirect_http_response_code = 8;
+}
+
+// Files served directly to the user for a given URL, such as images, CSS
+// stylesheets, or JavaScript source files. Static file handlers describe which
+// files in the application directory are static files, and which URLs serve
+// them.
+message StaticFilesHandler {
+ // Path to the static files matched by the URL pattern, from the
+ // application root directory. The path can refer to text matched in groupings
+ // in the URL pattern.
+ string path = 1;
+
+ // Regular expression that matches the file paths for all files that should be
+ // referenced by this handler.
+ string upload_path_regex = 2;
+
+ // HTTP headers to use for all responses from these URLs.
+ map<string, string> http_headers = 3;
+
+ // MIME type used to serve all files served by this handler.
+ //
+ // Defaults to file-specific MIME types, which are derived from each file's
+ // filename extension.
+ string mime_type = 4;
+
+ // Time a static file served by this handler should be cached
+ // by web proxies and browsers.
+ google.protobuf.Duration expiration = 5;
+
+ // Whether this handler should match the request if the file
+ // referenced by the handler does not exist.
+ bool require_matching_file = 6;
+
+ // Whether files should also be uploaded as code data. By default, files
+ // declared in static file handlers are uploaded as static
+ // data and are only served to end users; they cannot be read by the
+ // application. If enabled, uploads are charged against both your code and
+ // static data storage resource quotas.
+ bool application_readable = 7;
+}
+
+// Executes a script to handle the request that matches the URL pattern.
+message ScriptHandler {
+ // Path to the script from the application root directory.
+ string script_path = 1;
+}
+
+// Uses Google Cloud Endpoints to handle requests.
+message ApiEndpointHandler {
+ // Path to the script from the application root directory.
+ string script_path = 1;
+}
+
+// Health checking configuration for VM instances. Unhealthy instances
+// are killed and replaced with new instances. Only applicable for
+// instances in App Engine flexible environment.
+message HealthCheck {
+ // Whether to explicitly disable health checks for this instance.
+ bool disable_health_check = 1;
+
+ // Host header to send when performing an HTTP health check.
+ // Example: "myapp.appspot.com"
+ string host = 2;
+
+ // Number of consecutive successful health checks required before receiving
+ // traffic.
+ uint32 healthy_threshold = 3;
+
+ // Number of consecutive failed health checks required before removing
+ // traffic.
+ uint32 unhealthy_threshold = 4;
+
+ // Number of consecutive failed health checks required before an instance is
+ // restarted.
+ uint32 restart_threshold = 5;
+
+ // Interval between health checks.
+ google.protobuf.Duration check_interval = 6;
+
+ // Time before the health check is considered failed.
+ google.protobuf.Duration timeout = 7;
+}
+
+// Third-party Python runtime library that is required by the application.
+message Library {
+ // Name of the library. Example: "django".
+ string name = 1;
+
+ // Version of the library to select, or "latest".
+ string version = 2;
+}
+
+// Actions to take when the user is not logged in.
+enum AuthFailAction {
+ // Not specified. `AUTH_FAIL_ACTION_REDIRECT` is assumed.
+ AUTH_FAIL_ACTION_UNSPECIFIED = 0;
+
+ // Redirects user to "accounts.google.com". The user is redirected back to the
+ // application URL after signing in or creating an account.
+ AUTH_FAIL_ACTION_REDIRECT = 1;
+
+ // Rejects request with a `401` HTTP status code and an error
+ // message.
+ AUTH_FAIL_ACTION_UNAUTHORIZED = 2;
+}
+
+// Methods to restrict access to a URL based on login status.
+enum LoginRequirement {
+ // Not specified. `LOGIN_OPTIONAL` is assumed.
+ LOGIN_UNSPECIFIED = 0;
+
+ // Does not require that the user is signed in.
+ LOGIN_OPTIONAL = 1;
+
+ // If the user is not signed in, the `auth_fail_action` is taken.
+ // In addition, if the user is not an administrator for the
+ // application, they are given an error message regardless of
+ // `auth_fail_action`. If the user is an administrator, the handler
+ // proceeds.
+ LOGIN_ADMIN = 2;
+
+ // If the user has signed in, the handler proceeds normally. Otherwise, the
+ // auth_fail_action is taken.
+ LOGIN_REQUIRED = 3;
+}
+
+// Methods to enforce security (HTTPS) on a URL.
+enum SecurityLevel {
+ option allow_alias = true;
+ // Not specified.
+ SECURE_UNSPECIFIED = 0;
+
+ // Both HTTP and HTTPS requests with URLs that match the handler succeed
+ // without redirects. The application can examine the request to determine
+ // which protocol was used, and respond accordingly.
+ SECURE_DEFAULT = 0;
+
+ // Requests for a URL that match this handler that use HTTPS are automatically
+ // redirected to the HTTP equivalent URL.
+ SECURE_NEVER = 1;
+
+ // Both HTTP and HTTPS requests with URLs that match the handler succeed
+ // without redirects. The application can examine the request to determine
+ // which protocol was used and respond accordingly.
+ SECURE_OPTIONAL = 2;
+
+ // Requests for a URL that match this handler that do not use HTTPS are
+ // automatically redirected to the HTTPS URL with the same path. Query
+ // parameters are reserved for the redirect.
+ SECURE_ALWAYS = 3;
+}
diff --git a/third_party/googleapis/google/appengine/v1/appengine.proto b/third_party/googleapis/google/appengine/v1/appengine.proto
new file mode 100644
index 0000000000..ef947a65b6
--- /dev/null
+++ b/third_party/googleapis/google/appengine/v1/appengine.proto
@@ -0,0 +1,341 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.appengine.v1;
+
+import "google/api/annotations.proto";
+import "google/appengine/v1/application.proto";
+import "google/appengine/v1/instance.proto";
+import "google/appengine/v1/service.proto";
+import "google/appengine/v1/version.proto";
+import "google/iam/v1/iam_policy.proto";
+import "google/iam/v1/policy.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/field_mask.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/appengine/v1;appengine";
+option java_multiple_files = true;
+option java_outer_classname = "AppengineProto";
+option java_package = "com.google.appengine.v1";
+
+
+// Manages instances of a version.
+service Instances {
+ // Lists the instances of a version.
+ rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) {
+ option (google.api.http) = { get: "/v1/{parent=apps/*/services/*/versions/*}/instances" };
+ }
+
+ // Gets instance information.
+ rpc GetInstance(GetInstanceRequest) returns (Instance) {
+ option (google.api.http) = { get: "/v1/{name=apps/*/services/*/versions/*/instances/*}" };
+ }
+
+ // Stops a running instance.
+ rpc DeleteInstance(DeleteInstanceRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { delete: "/v1/{name=apps/*/services/*/versions/*/instances/*}" };
+ }
+
+ // Enables debugging on a VM instance. This allows you to use the SSH
+ // command to connect to the virtual machine where the instance lives.
+ // While in "debug mode", the instance continues to serve live traffic.
+ // You should delete the instance when you are done debugging and then
+ // allow the system to take over and determine if another instance
+ // should be started.
+ //
+ // Only applicable for instances in App Engine flexible environment.
+ rpc DebugInstance(DebugInstanceRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1/{name=apps/*/services/*/versions/*/instances/*}:debug" body: "*" };
+ }
+}
+
+// Manages versions of a service.
+service Versions {
+ // Lists the versions of a service.
+ rpc ListVersions(ListVersionsRequest) returns (ListVersionsResponse) {
+ option (google.api.http) = { get: "/v1/{parent=apps/*/services/*}/versions" };
+ }
+
+ // Gets the specified Version resource.
+ // By default, only a `BASIC_VIEW` will be returned.
+ // Specify the `FULL_VIEW` parameter to get the full resource.
+ rpc GetVersion(GetVersionRequest) returns (Version) {
+ option (google.api.http) = { get: "/v1/{name=apps/*/services/*/versions/*}" };
+ }
+
+ // Deploys code and resource files to a new version.
+ rpc CreateVersion(CreateVersionRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1/{parent=apps/*/services/*}/versions" body: "version" };
+ }
+
+ // Updates the specified Version resource.
+ // You can specify the following fields depending on the App Engine
+ // environment and type of scaling that the version resource uses:
+ //
+ // * [`serving_status`](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.serving_status):
+ // For Version resources that use basic scaling, manual scaling, or run in
+ // the App Engine flexible environment.
+ // * [`instance_class`](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.instance_class):
+ // For Version resources that run in the App Engine standard environment.
+ // * [`automatic_scaling.min_idle_instances`](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling):
+ // For Version resources that use automatic scaling and run in the App
+ // Engine standard environment.
+ // * [`automatic_scaling.max_idle_instances`](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling):
+ // For Version resources that use automatic scaling and run in the App
+ // Engine standard environment.
+ rpc UpdateVersion(UpdateVersionRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { patch: "/v1/{name=apps/*/services/*/versions/*}" body: "version" };
+ }
+
+ // Deletes an existing Version resource.
+ rpc DeleteVersion(DeleteVersionRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { delete: "/v1/{name=apps/*/services/*/versions/*}" };
+ }
+}
+
+// Manages services of an application.
+service Services {
+ // Lists all the services in the application.
+ rpc ListServices(ListServicesRequest) returns (ListServicesResponse) {
+ option (google.api.http) = { get: "/v1/{parent=apps/*}/services" };
+ }
+
+ // Gets the current configuration of the specified service.
+ rpc GetService(GetServiceRequest) returns (Service) {
+ option (google.api.http) = { get: "/v1/{name=apps/*/services/*}" };
+ }
+
+ // Updates the configuration of the specified service.
+ rpc UpdateService(UpdateServiceRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { patch: "/v1/{name=apps/*/services/*}" body: "service" };
+ }
+
+ // Deletes the specified service and all enclosed versions.
+ rpc DeleteService(DeleteServiceRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { delete: "/v1/{name=apps/*/services/*}" };
+ }
+}
+
+// Manages App Engine applications.
+service Applications {
+ // Gets information about an application.
+ rpc GetApplication(GetApplicationRequest) returns (Application) {
+ option (google.api.http) = { get: "/v1/{name=apps/*}" };
+ }
+
+ // Recreates the required App Engine features for the application in your
+ // project, for example a Cloud Storage bucket or App Engine service account.
+ // Use this method if you receive an error message about a missing feature,
+ // for example "*Error retrieving the App Engine service account*".
+ rpc RepairApplication(RepairApplicationRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1/{name=apps/*}:repair" body: "*" };
+ }
+}
+
+// Request message for `Applications.GetApplication`.
+message GetApplicationRequest {
+ // Name of the Application resource to get. Example: `apps/myapp`.
+ string name = 1;
+}
+
+// Request message for 'Applications.RepairApplication'.
+message RepairApplicationRequest {
+ // Name of the application to repair. Example: `apps/myapp`
+ string name = 1;
+}
+
+// Request message for `Services.ListServices`.
+message ListServicesRequest {
+ // Name of the parent Application resource. Example: `apps/myapp`.
+ string parent = 1;
+
+ // Maximum results to return per page.
+ int32 page_size = 2;
+
+ // Continuation token for fetching the next page of results.
+ string page_token = 3;
+}
+
+// Response message for `Services.ListServices`.
+message ListServicesResponse {
+ // The services belonging to the requested application.
+ repeated Service services = 1;
+
+ // Continuation token for fetching the next page of results.
+ string next_page_token = 2;
+}
+
+// Request message for `Services.GetService`.
+message GetServiceRequest {
+ // Name of the resource requested. Example: `apps/myapp/services/default`.
+ string name = 1;
+}
+
+// Request message for `Services.UpdateService`.
+message UpdateServiceRequest {
+ // Name of the resource to update. Example: `apps/myapp/services/default`.
+ string name = 1;
+
+ // A Service resource containing the updated service. Only fields set in the
+ // field mask will be updated.
+ Service service = 2;
+
+ // Standard field mask for the set of fields to be updated.
+ google.protobuf.FieldMask update_mask = 3;
+
+ // Set to `true` to gradually shift traffic from one version to another
+ // single version. By default, traffic is shifted immediately.
+ // For gradual traffic migration, the target version
+ // must be located within instances that are configured for both
+ // [warmup requests](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#inboundservicetype)
+ // and
+ // [automatic scaling](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#automaticscaling).
+ // You must specify the
+ // [`shardBy`](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services#shardby)
+ // field in the Service resource. Gradual traffic migration is not
+ // supported in the App Engine flexible environment. For examples, see
+ // [Migrating and Splitting Traffic](https://cloud.google.com/appengine/docs/admin-api/migrating-splitting-traffic).
+ bool migrate_traffic = 4;
+}
+
+// Request message for `Services.DeleteService`.
+message DeleteServiceRequest {
+ // Name of the resource requested. Example: `apps/myapp/services/default`.
+ string name = 1;
+}
+
+// Request message for `Versions.ListVersions`.
+message ListVersionsRequest {
+ // Name of the parent Service resource. Example:
+ // `apps/myapp/services/default`.
+ string parent = 1;
+
+ // Controls the set of fields returned in the `List` response.
+ VersionView view = 2;
+
+ // Maximum results to return per page.
+ int32 page_size = 3;
+
+ // Continuation token for fetching the next page of results.
+ string page_token = 4;
+}
+
+// Response message for `Versions.ListVersions`.
+message ListVersionsResponse {
+ // The versions belonging to the requested service.
+ repeated Version versions = 1;
+
+ // Continuation token for fetching the next page of results.
+ string next_page_token = 2;
+}
+
+// Request message for `Versions.GetVersion`.
+message GetVersionRequest {
+ // Name of the resource requested. Example:
+ // `apps/myapp/services/default/versions/v1`.
+ string name = 1;
+
+ // Controls the set of fields returned in the `Get` response.
+ VersionView view = 2;
+}
+
+// Request message for `Versions.CreateVersion`.
+message CreateVersionRequest {
+ // Name of the parent resource to create this version under. Example:
+ // `apps/myapp/services/default`.
+ string parent = 1;
+
+ // Application deployment configuration.
+ Version version = 2;
+}
+
+// Request message for `Versions.UpdateVersion`.
+message UpdateVersionRequest {
+ // Name of the resource to update. Example:
+ // `apps/myapp/services/default/versions/1`.
+ string name = 1;
+
+ // A Version containing the updated resource. Only fields set in the field
+ // mask will be updated.
+ Version version = 2;
+
+ // Standard field mask for the set of fields to be updated.
+ google.protobuf.FieldMask update_mask = 3;
+}
+
+// Request message for `Versions.DeleteVersion`.
+message DeleteVersionRequest {
+ // Name of the resource requested. Example:
+ // `apps/myapp/services/default/versions/v1`.
+ string name = 1;
+}
+
+// Request message for `Instances.ListInstances`.
+message ListInstancesRequest {
+ // Name of the parent Version resource. Example:
+ // `apps/myapp/services/default/versions/v1`.
+ string parent = 1;
+
+ // Maximum results to return per page.
+ int32 page_size = 2;
+
+ // Continuation token for fetching the next page of results.
+ string page_token = 3;
+}
+
+// Response message for `Instances.ListInstances`.
+message ListInstancesResponse {
+ // The instances belonging to the requested version.
+ repeated Instance instances = 1;
+
+ // Continuation token for fetching the next page of results.
+ string next_page_token = 2;
+}
+
+// Request message for `Instances.GetInstance`.
+message GetInstanceRequest {
+ // Name of the resource requested. Example:
+ // `apps/myapp/services/default/versions/v1/instances/instance-1`.
+ string name = 1;
+}
+
+// Request message for `Instances.DeleteInstance`.
+message DeleteInstanceRequest {
+ // Name of the resource requested. Example:
+ // `apps/myapp/services/default/versions/v1/instances/instance-1`.
+ string name = 1;
+}
+
+// Request message for `Instances.DebugInstance`.
+message DebugInstanceRequest {
+ // Name of the resource requested. Example:
+ // `apps/myapp/services/default/versions/v1/instances/instance-1`.
+ string name = 1;
+}
+
+// Fields that should be returned when [Version][google.appengine.v1.Version] resources
+// are retreived.
+enum VersionView {
+ // Basic version information including scaling and inbound services,
+ // but not detailed deployment information.
+ BASIC = 0;
+
+ // The information from `BASIC`, plus detailed information about the
+ // deployment. This format is required when creating resources, but
+ // is not returned in `Get` or `List` by default.
+ FULL = 1;
+}
diff --git a/third_party/googleapis/google/appengine/v1/application.proto b/third_party/googleapis/google/appengine/v1/application.proto
new file mode 100644
index 0000000000..d962dda6bd
--- /dev/null
+++ b/third_party/googleapis/google/appengine/v1/application.proto
@@ -0,0 +1,112 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.appengine.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/duration.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/appengine/v1;appengine";
+option java_multiple_files = true;
+option java_outer_classname = "ApplicationProto";
+option java_package = "com.google.appengine.v1";
+
+
+// An Application resource contains the top-level configuration of an App
+// Engine application.
+message Application {
+ // Full path to the Application resource in the API.
+ // Example: `apps/myapp`.
+ //
+ // @OutputOnly
+ string name = 1;
+
+ // Identifier of the Application resource. This identifier is equivalent
+ // to the project ID of the Google Cloud Platform project where you want to
+ // deploy your application.
+ // Example: `myapp`.
+ string id = 2;
+
+ // HTTP path dispatch rules for requests to the application that do not
+ // explicitly target a service or version. Rules are order-dependent.
+ //
+ // @OutputOnly
+ repeated UrlDispatchRule dispatch_rules = 3;
+
+ // Google Apps authentication domain that controls which users can access
+ // this application.
+ //
+ // Defaults to open access for any Google Account.
+ string auth_domain = 6;
+
+ // Location from which this application will be run. Application instances
+ // will run out of data centers in the chosen location, which is also where
+ // all of the application's end user content is stored.
+ //
+ // Defaults to `us-central`.
+ //
+ // Options are:
+ //
+ // `us-central` - Central US
+ //
+ // `europe-west` - Western Europe
+ //
+ // `us-east1` - Eastern US
+ string location_id = 7;
+
+ // Google Cloud Storage bucket that can be used for storing files
+ // associated with this application. This bucket is associated with the
+ // application and can be used by the gcloud deployment commands.
+ //
+ // @OutputOnly
+ string code_bucket = 8;
+
+ // Cookie expiration policy for this application.
+ //
+ // @OutputOnly
+ google.protobuf.Duration default_cookie_expiration = 9;
+
+ // Hostname used to reach this application, as resolved by App Engine.
+ //
+ // @OutputOnly
+ string default_hostname = 11;
+
+ // Google Cloud Storage bucket that can be used by this application to store
+ // content.
+ //
+ // @OutputOnly
+ string default_bucket = 12;
+}
+
+// Rules to match an HTTP request and dispatch that request to a service.
+message UrlDispatchRule {
+ // Domain name to match against. The wildcard "`*`" is supported if
+ // specified before a period: "`*.`".
+ //
+ // Defaults to matching all domains: "`*`".
+ string domain = 1;
+
+ // Pathname within the host. Must start with a "`/`". A
+ // single "`*`" can be included at the end of the path. The sum
+ // of the lengths of the domain and path may not exceed 100
+ // characters.
+ string path = 2;
+
+ // Resource ID of a service in this application that should
+ // serve the matched request. The service must already
+ // exist. Example: `default`.
+ string service = 3;
+}
diff --git a/third_party/googleapis/google/appengine/v1/deploy.proto b/third_party/googleapis/google/appengine/v1/deploy.proto
new file mode 100644
index 0000000000..77e6444fdd
--- /dev/null
+++ b/third_party/googleapis/google/appengine/v1/deploy.proto
@@ -0,0 +1,78 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.appengine.v1;
+
+import "google/api/annotations.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/appengine/v1;appengine";
+option java_multiple_files = true;
+option java_outer_classname = "DeployProto";
+option java_package = "com.google.appengine.v1";
+
+
+// Code and application artifacts used to deploy a version to App Engine.
+message Deployment {
+ // Manifest of the files stored in Google Cloud Storage that are included
+ // as part of this version. All files must be readable using the
+ // credentials supplied with this call.
+ map<string, FileInfo> files = 1;
+
+ // A Docker image that App Engine uses to run the version.
+ // Only applicable for instances in App Engine flexible environment.
+ ContainerInfo container = 2;
+
+ // The zip file for this deployment, if this is a zip deployment.
+ ZipInfo zip = 3;
+}
+
+// Single source file that is part of the version to be deployed. Each source
+// file that is deployed must be specified separately.
+message FileInfo {
+ // URL source to use to fetch this file. Must be a URL to a resource in
+ // Google Cloud Storage in the form
+ // 'http(s)://storage.googleapis.com/\<bucket\>/\<object\>'.
+ string source_url = 1;
+
+ // The SHA1 hash of the file, in hex.
+ string sha1_sum = 2;
+
+ // The MIME type of the file.
+ //
+ // Defaults to the value from Google Cloud Storage.
+ string mime_type = 3;
+}
+
+// Docker image that is used to start a VM container for the version you
+// deploy.
+message ContainerInfo {
+ // URI to the hosted container image in a Docker repository. The URI must be
+ // fully qualified and include a tag or digest.
+ // Examples: "gcr.io/my-project/image:tag" or "gcr.io/my-project/image@digest"
+ string image = 1;
+}
+
+message ZipInfo {
+ // URL of the zip file to deploy from. Must be a URL to a resource in
+ // Google Cloud Storage in the form
+ // 'http(s)://storage.googleapis.com/\<bucket\>/\<object\>'.
+ string source_url = 3;
+
+ // An estimate of the number of files in a zip for a zip deployment.
+ // If set, must be greater than or equal to the actual number of files.
+ // Used for optimizing performance; if not provided, deployment may be slow.
+ int32 files_count = 4;
+}
diff --git a/third_party/googleapis/google/appengine/v1/instance.proto b/third_party/googleapis/google/appengine/v1/instance.proto
new file mode 100644
index 0000000000..206af26993
--- /dev/null
+++ b/third_party/googleapis/google/appengine/v1/instance.proto
@@ -0,0 +1,121 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.appengine.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/appengine/v1;appengine";
+option java_multiple_files = true;
+option java_outer_classname = "InstanceProto";
+option java_package = "com.google.appengine.v1";
+
+
+// An Instance resource is the computing unit that App Engine uses to
+// automatically scale an application.
+message Instance {
+ // Availability of the instance.
+ enum Availability {
+ UNSPECIFIED = 0;
+
+ RESIDENT = 1;
+
+ DYNAMIC = 2;
+ }
+
+ // Full path to the Instance resource in the API.
+ // Example: `apps/myapp/services/default/versions/v1/instances/instance-1`.
+ //
+ // @OutputOnly
+ string name = 1;
+
+ // Relative name of the instance within the version.
+ // Example: `instance-1`.
+ //
+ // @OutputOnly
+ string id = 2;
+
+ // App Engine release this instance is running on.
+ //
+ // @OutputOnly
+ string app_engine_release = 3;
+
+ // Availability of the instance.
+ //
+ // @OutputOnly
+ Availability availability = 4;
+
+ // Name of the virtual machine where this instance lives. Only applicable
+ // for instances in App Engine flexible environment.
+ //
+ // @OutputOnly
+ string vm_name = 5;
+
+ // Zone where the virtual machine is located. Only applicable for instances
+ // in App Engine flexible environment.
+ //
+ // @OutputOnly
+ string vm_zone_name = 6;
+
+ // Virtual machine ID of this instance. Only applicable for instances in
+ // App Engine flexible environment.
+ //
+ // @OutputOnly
+ string vm_id = 7;
+
+ // Time that this instance was started.
+ //
+ // @OutputOnly
+ google.protobuf.Timestamp start_time = 8;
+
+ // Number of requests since this instance was started.
+ //
+ // @OutputOnly
+ int32 requests = 9;
+
+ // Number of errors since this instance was started.
+ //
+ // @OutputOnly
+ int32 errors = 10;
+
+ // Average queries per second (QPS) over the last minute.
+ //
+ // @OutputOnly
+ float qps = 11;
+
+ // Average latency (ms) over the last minute.
+ //
+ // @OutputOnly
+ int32 average_latency = 12;
+
+ // Total memory in use (bytes).
+ //
+ // @OutputOnly
+ int64 memory_usage = 13;
+
+ // Status of the virtual machine where this instance lives. Only applicable
+ // for instances in App Engine flexible environment.
+ //
+ // @OutputOnly
+ string vm_status = 14;
+
+ // Whether this instance is in debug mode. Only applicable for instances in
+ // App Engine flexible environment.
+ //
+ // @OutputOnly
+ bool vm_debug_enabled = 15;
+}
diff --git a/third_party/googleapis/google/appengine/v1/location.proto b/third_party/googleapis/google/appengine/v1/location.proto
new file mode 100644
index 0000000000..2a81fb912f
--- /dev/null
+++ b/third_party/googleapis/google/appengine/v1/location.proto
@@ -0,0 +1,39 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.appengine.v1;
+
+import "google/api/annotations.proto";
+import "google/type/latlng.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/appengine/v1;appengine";
+option java_multiple_files = true;
+option java_outer_classname = "LocationProto";
+option java_package = "com.google.appengine.v1";
+
+
+// Metadata for the given [google.cloud.location.Location][google.cloud.location.Location].
+message LocationMetadata {
+ // App Engine Standard Environment is available in the given location.
+ //
+ // @OutputOnly
+ bool standard_environment_available = 2;
+
+ // App Engine Flexible Environment is available in the given location.
+ //
+ // @OutputOnly
+ bool flexible_environment_available = 4;
+}
diff --git a/third_party/googleapis/google/appengine/v1/operation.proto b/third_party/googleapis/google/appengine/v1/operation.proto
new file mode 100644
index 0000000000..ec79888927
--- /dev/null
+++ b/third_party/googleapis/google/appengine/v1/operation.proto
@@ -0,0 +1,56 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.appengine.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/appengine/v1;appengine";
+option java_multiple_files = true;
+option java_outer_classname = "OperationProto";
+option java_package = "com.google.appengine.v1";
+
+
+// Metadata for the given [google.longrunning.Operation][google.longrunning.Operation].
+message OperationMetadataV1 {
+ // API method that initiated this operation. Example:
+ // `google.appengine.v1.Versions.CreateVersion`.
+ //
+ // @OutputOnly
+ string method = 1;
+
+ // Time that this operation was created.
+ //
+ // @OutputOnly
+ google.protobuf.Timestamp insert_time = 2;
+
+ // Time that this operation completed.
+ //
+ // @OutputOnly
+ google.protobuf.Timestamp end_time = 3;
+
+ // User who requested this operation.
+ //
+ // @OutputOnly
+ string user = 4;
+
+ // Name of the resource that this operation is acting on. Example:
+ // `apps/myapp/services/default`.
+ //
+ // @OutputOnly
+ string target = 5;
+}
diff --git a/third_party/googleapis/google/appengine/v1/service.proto b/third_party/googleapis/google/appengine/v1/service.proto
new file mode 100644
index 0000000000..2a24c47fd6
--- /dev/null
+++ b/third_party/googleapis/google/appengine/v1/service.proto
@@ -0,0 +1,83 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.appengine.v1;
+
+import "google/api/annotations.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/appengine/v1;appengine";
+option java_multiple_files = true;
+option java_outer_classname = "ServiceProto";
+option java_package = "com.google.appengine.v1";
+
+
+// A Service resource is a logical component of an application that can share
+// state and communicate in a secure fashion with other services.
+// For example, an application that handles customer requests might
+// include separate services to handle tasks such as backend data
+// analysis or API requests from mobile devices. Each service has a
+// collection of versions that define a specific set of code used to
+// implement the functionality of that service.
+message Service {
+ // Full path to the Service resource in the API.
+ // Example: `apps/myapp/services/default`.
+ //
+ // @OutputOnly
+ string name = 1;
+
+ // Relative name of the service within the application.
+ // Example: `default`.
+ //
+ // @OutputOnly
+ string id = 2;
+
+ // Mapping that defines fractional HTTP traffic diversion to
+ // different versions within the service.
+ TrafficSplit split = 3;
+}
+
+// Traffic routing configuration for versions within a single service. Traffic
+// splits define how traffic directed to the service is assigned to versions.
+message TrafficSplit {
+ // Available sharding mechanisms.
+ enum ShardBy {
+ // Diversion method unspecified.
+ UNSPECIFIED = 0;
+
+ // Diversion based on a specially named cookie, "GOOGAPPUID." The cookie
+ // must be set by the application itself or no diversion will occur.
+ COOKIE = 1;
+
+ // Diversion based on applying the modulus operation to a fingerprint
+ // of the IP address.
+ IP = 2;
+ }
+
+ // Mechanism used to determine which version a request is sent to.
+ // The traffic selection algorithm will
+ // be stable for either type until allocations are changed.
+ ShardBy shard_by = 1;
+
+ // Mapping from version IDs within the service to fractional
+ // (0.000, 1] allocations of traffic for that version. Each version can
+ // be specified only once, but some versions in the service may not
+ // have any traffic allocation. Services that have traffic allocated
+ // cannot be deleted until either the service is deleted or
+ // their traffic allocation is removed. Allocations must sum to 1.
+ // Up to two decimal place precision is supported for IP-based splits and
+ // up to three decimal places is supported for cookie-based splits.
+ map<string, double> allocations = 2;
+}
diff --git a/third_party/googleapis/google/appengine/v1/version.proto b/third_party/googleapis/google/appengine/v1/version.proto
new file mode 100644
index 0000000000..b32e1ac5fb
--- /dev/null
+++ b/third_party/googleapis/google/appengine/v1/version.proto
@@ -0,0 +1,378 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.appengine.v1;
+
+import "google/api/annotations.proto";
+import "google/appengine/v1/app_yaml.proto";
+import "google/appengine/v1/deploy.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/appengine/v1;appengine";
+option java_multiple_files = true;
+option java_outer_classname = "VersionProto";
+option java_package = "com.google.appengine.v1";
+
+
+// A Version resource is a specific set of source code and configuration files
+// that are deployed into a service.
+message Version {
+ // Full path to the Version resource in the API. Example:
+ // `apps/myapp/services/default/versions/v1`.
+ //
+ // @OutputOnly
+ string name = 1;
+
+ // Relative name of the version within the service. Example: `v1`.
+ // Version names can contain only lowercase letters, numbers, or hyphens.
+ // Reserved names: "default", "latest", and any name with the prefix "ah-".
+ string id = 2;
+
+ // Controls how instances are created.
+ //
+ // Defaults to `AutomaticScaling`.
+ oneof scaling {
+ // Automatic scaling is based on request rate, response latencies, and other
+ // application metrics.
+ AutomaticScaling automatic_scaling = 3;
+
+ // A service with basic scaling will create an instance when the application
+ // receives a request. The instance will be turned down when the app becomes
+ // idle. Basic scaling is ideal for work that is intermittent or driven by
+ // user activity.
+ BasicScaling basic_scaling = 4;
+
+ // A service with manual scaling runs continuously, allowing you to perform
+ // complex initialization and rely on the state of its memory over time.
+ ManualScaling manual_scaling = 5;
+ }
+
+ // Before an application can receive email or XMPP messages, the application
+ // must be configured to enable the service.
+ repeated InboundServiceType inbound_services = 6;
+
+ // Instance class that is used to run this version. Valid values are:
+ // * AutomaticScaling: `F1`, `F2`, `F4`, `F4_1G`
+ // * ManualScaling or BasicScaling: `B1`, `B2`, `B4`, `B8`, `B4_1G`
+ //
+ // Defaults to `F1` for AutomaticScaling and `B1` for ManualScaling or
+ // BasicScaling.
+ string instance_class = 7;
+
+ // Extra network settings. Only applicable for VM runtimes.
+ Network network = 8;
+
+ // Machine resources for this version. Only applicable for VM runtimes.
+ Resources resources = 9;
+
+ // Desired runtime. Example: `python27`.
+ string runtime = 10;
+
+ // Whether multiple requests can be dispatched to this version at once.
+ bool threadsafe = 11;
+
+ // Whether to deploy this version in a container on a virtual machine.
+ bool vm = 12;
+
+ // Metadata settings that are supplied to this version to enable
+ // beta runtime features.
+ map<string, string> beta_settings = 13;
+
+ // App Engine execution environment for this version.
+ //
+ // Defaults to `standard`.
+ string env = 14;
+
+ // Current serving status of this version. Only the versions with a
+ // `SERVING` status create instances and can be billed.
+ //
+ // `SERVING_STATUS_UNSPECIFIED` is an invalid value. Defaults to `SERVING`.
+ ServingStatus serving_status = 15;
+
+ // Email address of the user who created this version.
+ //
+ // @OutputOnly
+ string created_by = 16;
+
+ // Time that this version was created.
+ //
+ // @OutputOnly
+ google.protobuf.Timestamp create_time = 17;
+
+ // Total size in bytes of all the files that are included in this version
+ // and curerntly hosted on the App Engine disk.
+ //
+ // @OutputOnly
+ int64 disk_usage_bytes = 18;
+
+ // An ordered list of URL-matching patterns that should be applied to incoming
+ // requests. The first matching URL handles the request and other request
+ // handlers are not attempted.
+ //
+ // Only returned in `GET` requests if `view=FULL` is set.
+ repeated UrlMap handlers = 100;
+
+ // Custom static error pages. Limited to 10KB per page.
+ //
+ // Only returned in `GET` requests if `view=FULL` is set.
+ repeated ErrorHandler error_handlers = 101;
+
+ // Configuration for third-party Python runtime libraries that are required
+ // by the application.
+ //
+ // Only returned in `GET` requests if `view=FULL` is set.
+ repeated Library libraries = 102;
+
+ // Serving configuration for
+ // [Google Cloud Endpoints](https://cloud.google.com/appengine/docs/python/endpoints/).
+ //
+ // Only returned in `GET` requests if `view=FULL` is set.
+ ApiConfigHandler api_config = 103;
+
+ // Environment variables available to the application.
+ //
+ // Only returned in `GET` requests if `view=FULL` is set.
+ map<string, string> env_variables = 104;
+
+ // Duration that static files should be cached by web proxies and browsers.
+ // Only applicable if the corresponding
+ // [StaticFilesHandler](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#staticfileshandler)
+ // does not specify its own expiration time.
+ //
+ // Only returned in `GET` requests if `view=FULL` is set.
+ google.protobuf.Duration default_expiration = 105;
+
+ // Configures health checking for VM instances. Unhealthy instances are
+ // stopped and replaced with new instances. Only applicable for VM
+ // runtimes.
+ //
+ // Only returned in `GET` requests if `view=FULL` is set.
+ HealthCheck health_check = 106;
+
+ // Files that match this pattern will not be built into this version.
+ // Only applicable for Go runtimes.
+ //
+ // Only returned in `GET` requests if `view=FULL` is set.
+ string nobuild_files_regex = 107;
+
+ // Code and application artifacts that make up this version.
+ //
+ // Only returned in `GET` requests if `view=FULL` is set.
+ Deployment deployment = 108;
+
+ // Serving URL for this version. Example:
+ // "https://myversion-dot-myservice-dot-myapp.appspot.com"
+ //
+ // @OutputOnly
+ string version_url = 109;
+}
+
+// Automatic scaling is based on request rate, response latencies, and other
+// application metrics.
+message AutomaticScaling {
+ // Amount of time that the
+ // [Autoscaler](https://cloud.google.com/compute/docs/autoscaler/)
+ // should wait between changes to the number of virtual machines.
+ // Only applicable for VM runtimes.
+ google.protobuf.Duration cool_down_period = 1;
+
+ // Target scaling by CPU usage.
+ CpuUtilization cpu_utilization = 2;
+
+ // Number of concurrent requests an automatic scaling instance can accept
+ // before the scheduler spawns a new instance.
+ //
+ // Defaults to a runtime-specific value.
+ int32 max_concurrent_requests = 3;
+
+ // Maximum number of idle instances that should be maintained for this
+ // version.
+ int32 max_idle_instances = 4;
+
+ // Maximum number of instances that should be started to handle requests.
+ int32 max_total_instances = 5;
+
+ // Maximum amount of time that a request should wait in the pending queue
+ // before starting a new instance to handle it.
+ google.protobuf.Duration max_pending_latency = 6;
+
+ // Minimum number of idle instances that should be maintained for
+ // this version. Only applicable for the default version of a service.
+ int32 min_idle_instances = 7;
+
+ // Minimum number of instances that should be maintained for this version.
+ int32 min_total_instances = 8;
+
+ // Minimum amount of time a request should wait in the pending queue before
+ // starting a new instance to handle it.
+ google.protobuf.Duration min_pending_latency = 9;
+
+ // Target scaling by request utilization.
+ RequestUtilization request_utilization = 10;
+
+ // Target scaling by disk usage.
+ DiskUtilization disk_utilization = 11;
+
+ // Target scaling by network usage.
+ NetworkUtilization network_utilization = 12;
+}
+
+// A service with basic scaling will create an instance when the application
+// receives a request. The instance will be turned down when the app becomes
+// idle. Basic scaling is ideal for work that is intermittent or driven by
+// user activity.
+message BasicScaling {
+ // Duration of time after the last request that an instance must wait before
+ // the instance is shut down.
+ google.protobuf.Duration idle_timeout = 1;
+
+ // Maximum number of instances to create for this version.
+ int32 max_instances = 2;
+}
+
+// A service with manual scaling runs continuously, allowing you to perform
+// complex initialization and rely on the state of its memory over time.
+message ManualScaling {
+ // Number of instances to assign to the service at the start. This number
+ // can later be altered by using the
+ // [Modules API](https://cloud.google.com/appengine/docs/python/modules/functions)
+ // `set_num_instances()` function.
+ int32 instances = 1;
+}
+
+// Target scaling by CPU usage.
+message CpuUtilization {
+ // Period of time over which CPU utilization is calculated.
+ google.protobuf.Duration aggregation_window_length = 1;
+
+ // Target CPU utilization ratio to maintain when scaling. Must be between 0
+ // and 1.
+ double target_utilization = 2;
+}
+
+// Target scaling by request utilization. Only applicable for VM runtimes.
+message RequestUtilization {
+ // Target requests per second.
+ int32 target_request_count_per_second = 1;
+
+ // Target number of concurrent requests.
+ int32 target_concurrent_requests = 2;
+}
+
+// Target scaling by disk usage. Only applicable for VM runtimes.
+message DiskUtilization {
+ // Target bytes written per second.
+ int32 target_write_bytes_per_second = 14;
+
+ // Target ops written per second.
+ int32 target_write_ops_per_second = 15;
+
+ // Target bytes read per second.
+ int32 target_read_bytes_per_second = 16;
+
+ // Target ops read per seconds.
+ int32 target_read_ops_per_second = 17;
+}
+
+// Target scaling by network usage. Only applicable for VM runtimes.
+message NetworkUtilization {
+ // Target bytes sent per second.
+ int32 target_sent_bytes_per_second = 1;
+
+ // Target packets sent per second.
+ int32 target_sent_packets_per_second = 11;
+
+ // Target bytes received per second.
+ int32 target_received_bytes_per_second = 12;
+
+ // Target packets received per second.
+ int32 target_received_packets_per_second = 13;
+}
+
+// Extra network settings. Only applicable for VM runtimes.
+message Network {
+ // List of ports, or port pairs, to forward from the virtual machine to the
+ // application container.
+ repeated string forwarded_ports = 1;
+
+ // Tag to apply to the VM instance during creation.
+ string instance_tag = 2;
+
+ // Google Cloud Platform network where the virtual machines are created.
+ // Specify the short name, not the resource path.
+ //
+ // Defaults to `default`.
+ string name = 3;
+}
+
+// Machine resources for a version.
+message Resources {
+ // Number of CPU cores needed.
+ double cpu = 1;
+
+ // Disk size (GB) needed.
+ double disk_gb = 2;
+
+ // Memory (GB) needed.
+ double memory_gb = 3;
+}
+
+// Available inbound services.
+enum InboundServiceType {
+ // Not specified.
+ INBOUND_SERVICE_UNSPECIFIED = 0;
+
+ // Allows an application to receive mail.
+ INBOUND_SERVICE_MAIL = 1;
+
+ // Allows an application to receive email-bound notifications.
+ INBOUND_SERVICE_MAIL_BOUNCE = 2;
+
+ // Allows an application to receive error stanzas.
+ INBOUND_SERVICE_XMPP_ERROR = 3;
+
+ // Allows an application to receive instant messages.
+ INBOUND_SERVICE_XMPP_MESSAGE = 4;
+
+ // Allows an application to receive user subscription POSTs.
+ INBOUND_SERVICE_XMPP_SUBSCRIBE = 5;
+
+ // Allows an application to receive a user's chat presence.
+ INBOUND_SERVICE_XMPP_PRESENCE = 6;
+
+ // Registers an application for notifications when a client connects or
+ // disconnects from a channel.
+ INBOUND_SERVICE_CHANNEL_PRESENCE = 7;
+
+ // Enables warmup requests.
+ INBOUND_SERVICE_WARMUP = 9;
+}
+
+// Run states of a version.
+enum ServingStatus {
+ // Not specified.
+ SERVING_STATUS_UNSPECIFIED = 0;
+
+ // Currently serving. Instances are created according to the
+ // scaling settings of the version.
+ SERVING = 1;
+
+ // Disabled. No instances will be created and the scaling
+ // settings are ignored until the state of the version changes
+ // to `SERVING`.
+ STOPPED = 2;
+}
diff --git a/third_party/googleapis/google/assistant/embedded/README.md b/third_party/googleapis/google/assistant/embedded/README.md
new file mode 100644
index 0000000000..756d9ff3ec
--- /dev/null
+++ b/third_party/googleapis/google/assistant/embedded/README.md
@@ -0,0 +1,3 @@
+The `Google Assistant API` allows developers to embed the Google Assistant into
+their devices. It provides an audio-in (spoken user query) and
+audio-out (Assistant spoken response).
diff --git a/third_party/googleapis/google/assistant/embedded/v1alpha1/embedded_assistant.proto b/third_party/googleapis/google/assistant/embedded/v1alpha1/embedded_assistant.proto
new file mode 100644
index 0000000000..4c42634c43
--- /dev/null
+++ b/third_party/googleapis/google/assistant/embedded/v1alpha1/embedded_assistant.proto
@@ -0,0 +1,281 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.assistant.embedded.v1alpha1;
+
+import "google/api/annotations.proto";
+import "google/rpc/status.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/assistant/embedded/v1alpha1;embedded";
+option java_multiple_files = true;
+option java_outer_classname = "AssistantProto";
+option java_package = "com.google.assistant.embedded.v1alpha1";
+
+
+// Service that implements Google Assistant API.
+service EmbeddedAssistant {
+ // Initiates or continues a conversation with the embedded assistant service.
+ // Each call performs one round-trip, sending an audio request to the service
+ // and receiving the audio response. Uses bidirectional streaming to receive
+ // results, such as the `END_OF_UTTERANCE` event, while sending audio.
+ //
+ // A conversation is one or more gRPC connections, each consisting of several
+ // streamed requests and responses.
+ // For example, the user says *Add to my shopping list* and the assistant
+ // responds *What do you want to add?*. The sequence of streamed requests and
+ // responses in the first gRPC message could be:
+ //
+ // * ConverseRequest.config
+ // * ConverseRequest.audio_in
+ // * ConverseRequest.audio_in
+ // * ConverseRequest.audio_in
+ // * ConverseRequest.audio_in
+ // * ConverseResponse.event_type.END_OF_UTTERANCE
+ // * ConverseResponse.result.microphone_mode.DIALOG_FOLLOW_ON
+ // * ConverseResponse.audio_out
+ // * ConverseResponse.audio_out
+ // * ConverseResponse.audio_out
+ //
+ // The user then says *bagels* and the assistant responds
+ // *OK, I've added bagels to your shopping list*. This is sent as another gRPC
+ // connection call to the `Converse` method, again with streamed requests and
+ // responses, such as:
+ //
+ // * ConverseRequest.config
+ // * ConverseRequest.audio_in
+ // * ConverseRequest.audio_in
+ // * ConverseRequest.audio_in
+ // * ConverseResponse.event_type.END_OF_UTTERANCE
+ // * ConverseResponse.result.microphone_mode.CLOSE_MICROPHONE
+ // * ConverseResponse.audio_out
+ // * ConverseResponse.audio_out
+ // * ConverseResponse.audio_out
+ // * ConverseResponse.audio_out
+ //
+ // Although the precise order of responses is not guaranteed, sequential
+ // ConverseResponse.audio_out messages will always contain sequential portions
+ // of audio.
+ rpc Converse(stream ConverseRequest) returns (stream ConverseResponse);
+}
+
+// Specifies how to process the `ConverseRequest` messages.
+message ConverseConfig {
+ // *Required* Specifies how to process the subsequent incoming audio.
+ AudioInConfig audio_in_config = 1;
+
+ // *Required* Specifies how to format the audio that will be returned.
+ AudioOutConfig audio_out_config = 2;
+
+ // *Required* Represents the current dialog state.
+ ConverseState converse_state = 3;
+}
+
+// Specifies how to process the `audio_in` data that will be provided in
+// subsequent requests. For recommended settings, see the Google Assistant SDK
+// [best practices](https://developers.google.com/assistant/best-practices).
+message AudioInConfig {
+ // Audio encoding of the data sent in the audio message.
+ // Audio must be one-channel (mono). The only language supported is "en-US".
+ enum Encoding {
+ // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][].
+ ENCODING_UNSPECIFIED = 0;
+
+ // Uncompressed 16-bit signed little-endian samples (Linear PCM).
+ // This encoding includes no header, only the raw audio bytes.
+ LINEAR16 = 1;
+
+ // [`FLAC`](https://xiph.org/flac/documentation.html) (Free Lossless Audio
+ // Codec) is the recommended encoding because it is
+ // lossless--therefore recognition is not compromised--and
+ // requires only about half the bandwidth of `LINEAR16`. This encoding
+ // includes the `FLAC` stream header followed by audio data. It supports
+ // 16-bit and 24-bit samples, however, not all fields in `STREAMINFO` are
+ // supported.
+ FLAC = 2;
+ }
+
+ // *Required* Encoding of audio data sent in all `audio_in` messages.
+ Encoding encoding = 1;
+
+ // *Required* Sample rate (in Hertz) of the audio data sent in all `audio_in`
+ // messages. Valid values are from 16000-24000, but 16000 is optimal.
+ // For best results, set the sampling rate of the audio source to 16000 Hz.
+ // If that's not possible, use the native sample rate of the audio source
+ // (instead of re-sampling).
+ int32 sample_rate_hertz = 2;
+}
+
+// Specifies the desired format for the server to use when it returns
+// `audio_out` messages.
+message AudioOutConfig {
+ // Audio encoding of the data returned in the audio message. All encodings are
+ // raw audio bytes with no header, except as indicated below.
+ enum Encoding {
+ // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][].
+ ENCODING_UNSPECIFIED = 0;
+
+ // Uncompressed 16-bit signed little-endian samples (Linear PCM).
+ LINEAR16 = 1;
+
+ // MP3 audio encoding. The sample rate is encoded in the payload.
+ MP3 = 2;
+
+ // Opus-encoded audio wrapped in an ogg container. The result will be a
+ // file which can be played natively on Android and in some browsers (such
+ // as Chrome). The quality of the encoding is considerably higher than MP3
+ // while using the same bitrate. The sample rate is encoded in the payload.
+ OPUS_IN_OGG = 3;
+ }
+
+ // *Required* The encoding of audio data to be returned in all `audio_out`
+ // messages.
+ Encoding encoding = 1;
+
+ // *Required* The sample rate in Hertz of the audio data returned in
+ // `audio_out` messages. Valid values are: 16000-24000.
+ int32 sample_rate_hertz = 2;
+
+ // *Required* Current volume setting of the device's audio output.
+ // Valid values are 1 to 100 (corresponding to 1% to 100%).
+ int32 volume_percentage = 3;
+}
+
+// Provides information about the current dialog state.
+message ConverseState {
+ // *Required* The `conversation_state` value returned in the prior
+ // `ConverseResponse`. Omit (do not set the field) if there was no prior
+ // `ConverseResponse`. If there was a prior `ConverseResponse`, do not omit
+ // this field; doing so will end that conversation (and this new request will
+ // start a new conversation).
+ bytes conversation_state = 1;
+}
+
+// The audio containing the assistant's response to the query. Sequential chunks
+// of audio data are received in sequential `ConverseResponse` messages.
+message AudioOut {
+ // *Output-only* The audio data containing the assistant's response to the
+ // query. Sequential chunks of audio data are received in sequential
+ // `ConverseResponse` messages.
+ bytes audio_data = 1;
+}
+
+// The semantic result for the user's spoken query.
+message ConverseResult {
+ // Possible states of the microphone after a `Converse` RPC completes.
+ enum MicrophoneMode {
+ // No mode specified.
+ MICROPHONE_MODE_UNSPECIFIED = 0;
+
+ // The service is not expecting a follow-on question from the user.
+ // The microphone should remain off until the user re-activates it.
+ CLOSE_MICROPHONE = 1;
+
+ // The service is expecting a follow-on question from the user. The
+ // microphone should be re-opened when the `AudioOut` playback completes
+ // (by starting a new `Converse` RPC call to send the new audio).
+ DIALOG_FOLLOW_ON = 2;
+ }
+
+ // *Output-only* The recognized transcript of what the user said.
+ string spoken_request_text = 1;
+
+ // *Output-only* The text of the assistant's spoken response. This is only
+ // returned for an IFTTT action.
+ string spoken_response_text = 2;
+
+ // *Output-only* State information for subsequent `ConverseRequest`. This
+ // value should be saved in the client and returned in the
+ // `conversation_state` with the next `ConverseRequest`. (The client does not
+ // need to interpret or otherwise use this value.) There is no need to save
+ // this information across device restarts.
+ bytes conversation_state = 3;
+
+ // *Output-only* Specifies the mode of the microphone after this `Converse`
+ // RPC is processed.
+ MicrophoneMode microphone_mode = 4;
+
+ // *Output-only* Updated volume level. The value will be 0 or omitted
+ // (indicating no change) unless a voice command such as "Increase the volume"
+ // or "Set volume level 4" was recognized, in which case the value will be
+ // between 1 and 100 (corresponding to the new volume level of 1% to 100%).
+ // Typically, a client should use this volume level when playing the
+ // `audio_out` data, and retain this value as the current volume level and
+ // supply it in the `AudioOutConfig` of the next `ConverseRequest`. (Some
+ // clients may also implement other ways to allow the current volume level to
+ // be changed, for example, by providing a knob that the user can turn.)
+ int32 volume_percentage = 5;
+}
+
+// The top-level message sent by the client. Clients must send at least two, and
+// typically numerous `ConverseRequest` messages. The first message must
+// contain a `config` message and must not contain `audio_in` data. All
+// subsequent messages must contain `audio_in` data and must not contain a
+// `config` message.
+message ConverseRequest {
+ // Exactly one of these fields must be specified in each `ConverseRequest`.
+ oneof converse_request {
+ // The `config` message provides information to the recognizer that
+ // specifies how to process the request.
+ // The first `ConverseRequest` message must contain a `config` message.
+ ConverseConfig config = 1;
+
+ // The audio data to be recognized. Sequential chunks of audio data are sent
+ // in sequential `ConverseRequest` messages. The first `ConverseRequest`
+ // message must not contain `audio_in` data and all subsequent
+ // `ConverseRequest` messages must contain `audio_in` data. The audio bytes
+ // must be encoded as specified in `AudioInConfig`.
+ // Audio must be sent at approximately real-time (16000 samples per second).
+ // An error will be returned if audio is sent significantly faster or
+ // slower.
+ bytes audio_in = 2;
+ }
+}
+
+// The top-level message received by the client. A series of one or more
+// `ConverseResponse` messages are streamed back to the client.
+message ConverseResponse {
+ // Indicates the type of event.
+ enum EventType {
+ // No event specified.
+ EVENT_TYPE_UNSPECIFIED = 0;
+
+ // This event indicates that the server has detected the end of the user's
+ // speech utterance and expects no additional speech. Therefore, the server
+ // will not process additional audio (although it may subsequently return
+ // additional results). The client should stop sending additional audio
+ // data, half-close the gRPC connection, and wait for any additional results
+ // until the server closes the gRPC connection.
+ END_OF_UTTERANCE = 1;
+ }
+
+ // Exactly one of these fields will be populated in each `ConverseResponse`.
+ oneof converse_response {
+ // *Output-only* If set, returns a [google.rpc.Status][google.rpc.Status] message that
+ // specifies the error for the operation.
+ // If an error occurs during processing, this message will be set and there
+ // will be no further messages sent.
+ google.rpc.Status error = 1;
+
+ // *Output-only* Indicates the type of event.
+ EventType event_type = 2;
+
+ // *Output-only* The audio containing the assistant's response to the query.
+ AudioOut audio_out = 3;
+
+ // *Output-only* The semantic result for the user's spoken query.
+ ConverseResult result = 5;
+ }
+}
diff --git a/third_party/googleapis/google/bigtable/admin/bigtableadmin.yaml b/third_party/googleapis/google/bigtable/admin/bigtableadmin.yaml
new file mode 100644
index 0000000000..cbf0447122
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/admin/bigtableadmin.yaml
@@ -0,0 +1,76 @@
+# Google Bigtable Admin API service configuration
+
+type: google.api.Service
+config_version: 3
+name: bigtableadmin.googleapis.com
+title: Cloud Bigtable Admin API
+
+apis:
+- name: google.bigtable.admin.v2.BigtableInstanceAdmin
+- name: google.bigtable.admin.v2.BigtableTableAdmin
+- name: google.longrunning.Operations
+
+# Additional types which are used as google.protobuf.Any values
+types:
+- name: google.bigtable.admin.v2.CreateInstanceMetadata
+- name: google.bigtable.admin.v2.UpdateClusterMetadata
+
+authentication:
+ rules:
+ # Unless explicitly weakened, all BigtableInstanceAdmin ops require cluster
+ # admin access.
+ - selector: google.bigtable.admin.v2.BigtableInstanceAdmin.*,
+ google.longrunning.Operations.*
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/bigtable.admin,
+ https://www.googleapis.com/auth/bigtable.admin.cluster,
+ https://www.googleapis.com/auth/bigtable.admin.instance,
+ https://www.googleapis.com/auth/cloud-bigtable.admin,
+ https://www.googleapis.com/auth/cloud-bigtable.admin.cluster,
+ https://www.googleapis.com/auth/cloud-platform
+ # BigtableInstanceAdmin Ops which only require read access
+ - selector: google.bigtable.admin.v2.BigtableInstanceAdmin.GetCluster,
+ google.bigtable.admin.v2.BigtableInstanceAdmin.GetInstance,
+ google.bigtable.admin.v2.BigtableInstanceAdmin.ListClusters,
+ google.bigtable.admin.v2.BigtableInstanceAdmin.ListInstances,
+ google.longrunning.Operations.GetOperation,
+ google.longrunning.Operations.ListOperations
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/bigtable.admin,
+ https://www.googleapis.com/auth/bigtable.admin.cluster,
+ https://www.googleapis.com/auth/bigtable.admin.instance,
+ https://www.googleapis.com/auth/cloud-bigtable.admin,
+ https://www.googleapis.com/auth/cloud-bigtable.admin.cluster,
+ https://www.googleapis.com/auth/cloud-platform,
+ https://www.googleapis.com/auth/cloud-platform.read-only
+
+ # Unless explicitly weakened, all BigtableTableAdmin ops require table admin access
+ - selector: google.bigtable.admin.v2.BigtableTableAdmin.*
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/bigtable.admin,
+ https://www.googleapis.com/auth/bigtable.admin.table,
+ https://www.googleapis.com/auth/cloud-bigtable.admin,
+ https://www.googleapis.com/auth/cloud-bigtable.admin.table,
+ https://www.googleapis.com/auth/cloud-platform
+ # BigtableTableAdmin Ops which only require read access
+ - selector: google.bigtable.admin.v2.BigtableTableAdmin.GetTable,
+ google.bigtable.admin.v2.BigtableTableAdmin.ListTables
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/bigtable.admin,
+ https://www.googleapis.com/auth/bigtable.admin.table,
+ https://www.googleapis.com/auth/cloud-bigtable.admin,
+ https://www.googleapis.com/auth/cloud-bigtable.admin.table,
+ https://www.googleapis.com/auth/cloud-platform,
+ https://www.googleapis.com/auth/cloud-platform.read-only
+
+# Http override to expose Operations API at v2
+http:
+ rules:
+ - selector: google.longrunning.Operations.GetOperation
+ get: '/v2/{name=operations/**}'
+ - selector: google.longrunning.Operations.ListOperations
+ get: '/v2/{name=operations}'
+ - selector: google.longrunning.Operations.CancelOperation
+ post: '/v2/{name=operations/**}:cancel'
+ - selector: google.longrunning.Operations.DeleteOperation
+ delete: '/v2/{name=operations/**}'
diff --git a/third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_data.proto b/third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_data.proto
new file mode 100644
index 0000000000..40072416cd
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_data.proto
@@ -0,0 +1,126 @@
+// Copyright (c) 2015, Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.admin.table.v1;
+
+import "google/longrunning/operations.proto";
+import "google/protobuf/duration.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table";
+option java_multiple_files = true;
+option java_outer_classname = "BigtableTableDataProto";
+option java_package = "com.google.bigtable.admin.table.v1";
+
+
+// A collection of user data indexed by row, column, and timestamp.
+// Each table is served using the resources of its parent cluster.
+message Table {
+ enum TimestampGranularity {
+ MILLIS = 0;
+ }
+
+ // A unique identifier of the form
+ // <cluster_name>/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*
+ string name = 1;
+
+ // If this Table is in the process of being created, the Operation used to
+ // track its progress. As long as this operation is present, the Table will
+ // not accept any Table Admin or Read/Write requests.
+ google.longrunning.Operation current_operation = 2;
+
+ // The column families configured for this table, mapped by column family id.
+ map<string, ColumnFamily> column_families = 3;
+
+ // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in
+ // this table. Timestamps not matching the granularity will be rejected.
+ // Cannot be changed once the table is created.
+ TimestampGranularity granularity = 4;
+}
+
+// A set of columns within a table which share a common configuration.
+message ColumnFamily {
+ // A unique identifier of the form <table_name>/columnFamilies/[-_.a-zA-Z0-9]+
+ // The last segment is the same as the "name" field in
+ // google.bigtable.v1.Family.
+ string name = 1;
+
+ // Garbage collection expression specified by the following grammar:
+ // GC = EXPR
+ // | "" ;
+ // EXPR = EXPR, "||", EXPR (* lowest precedence *)
+ // | EXPR, "&&", EXPR
+ // | "(", EXPR, ")" (* highest precedence *)
+ // | PROP ;
+ // PROP = "version() >", NUM32
+ // | "age() >", NUM64, [ UNIT ] ;
+ // NUM32 = non-zero-digit { digit } ; (* # NUM32 <= 2^32 - 1 *)
+ // NUM64 = non-zero-digit { digit } ; (* # NUM64 <= 2^63 - 1 *)
+ // UNIT = "d" | "h" | "m" (* d=days, h=hours, m=minutes, else micros *)
+ // GC expressions can be up to 500 characters in length
+ //
+ // The different types of PROP are defined as follows:
+ // version() - cell index, counting from most recent and starting at 1
+ // age() - age of the cell (current time minus cell timestamp)
+ //
+ // Example: "version() > 3 || (age() > 3d && version() > 1)"
+ // drop cells beyond the most recent three, and drop cells older than three
+ // days unless they're the most recent cell in the row/column
+ //
+ // Garbage collection executes opportunistically in the background, and so
+ // it's possible for reads to return a cell even if it matches the active GC
+ // expression for its family.
+ string gc_expression = 2;
+
+ // Garbage collection rule specified as a protobuf.
+ // Supersedes `gc_expression`.
+ // Must serialize to at most 500 bytes.
+ //
+ // NOTE: Garbage collection executes opportunistically in the background, and
+ // so it's possible for reads to return a cell even if it matches the active
+ // GC expression for its family.
+ GcRule gc_rule = 3;
+}
+
+// Rule for determining which cells to delete during garbage collection.
+message GcRule {
+ // A GcRule which deletes cells matching all of the given rules.
+ message Intersection {
+ // Only delete cells which would be deleted by every element of `rules`.
+ repeated GcRule rules = 1;
+ }
+
+ // A GcRule which deletes cells matching any of the given rules.
+ message Union {
+ // Delete cells which would be deleted by any element of `rules`.
+ repeated GcRule rules = 1;
+ }
+
+ oneof rule {
+ // Delete all cells in a column except the most recent N.
+ int32 max_num_versions = 1;
+
+ // Delete cells in a column older than the given age.
+ // Values must be at least one millisecond, and will be truncated to
+ // microsecond granularity.
+ google.protobuf.Duration max_age = 2;
+
+ // Delete cells that would be deleted by every nested rule.
+ Intersection intersection = 3;
+
+ // Delete cells that would be deleted by any nested rule.
+ Union union = 4;
+ }
+}
diff --git a/third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_service.proto b/third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_service.proto
new file mode 100644
index 0000000000..6962862776
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_service.proto
@@ -0,0 +1,80 @@
+// Copyright (c) 2015, Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.admin.table.v1;
+
+import "google/api/annotations.proto";
+import "google/bigtable/admin/table/v1/bigtable_table_data.proto";
+import "google/bigtable/admin/table/v1/bigtable_table_service_messages.proto";
+import "google/protobuf/empty.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table";
+option java_multiple_files = true;
+option java_outer_classname = "BigtableTableServicesProto";
+option java_package = "com.google.bigtable.admin.table.v1";
+
+
+// Service for creating, configuring, and deleting Cloud Bigtable tables.
+// Provides access to the table schemas only, not the data stored within the tables.
+service BigtableTableService {
+ // Creates a new table, to be served from a specified cluster.
+ // The table can be created with a full set of initial column families,
+ // specified in the request.
+ rpc CreateTable(CreateTableRequest) returns (Table) {
+ option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}/tables" body: "*" };
+ }
+
+ // Lists the names of all tables served from a specified cluster.
+ rpc ListTables(ListTablesRequest) returns (ListTablesResponse) {
+ option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}/tables" };
+ }
+
+ // Gets the schema of the specified table, including its column families.
+ rpc GetTable(GetTableRequest) returns (Table) {
+ option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" };
+ }
+
+ // Permanently deletes a specified table and all of its data.
+ rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" };
+ }
+
+ // Changes the name of a specified table.
+ // Cannot be used to move tables between clusters, zones, or projects.
+ rpc RenameTable(RenameTableRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}:rename" body: "*" };
+ }
+
+ // Creates a new column family within a specified table.
+ rpc CreateColumnFamily(CreateColumnFamilyRequest) returns (ColumnFamily) {
+ option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}/columnFamilies" body: "*" };
+ }
+
+ // Changes the configuration of a specified column family.
+ rpc UpdateColumnFamily(ColumnFamily) returns (ColumnFamily) {
+ option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" body: "*" };
+ }
+
+ // Permanently deletes a specified column family and all of its data.
+ rpc DeleteColumnFamily(DeleteColumnFamilyRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" };
+ }
+
+ // Delete all rows in a table corresponding to a particular prefix
+ rpc BulkDeleteRows(BulkDeleteRowsRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:bulkDeleteRows" body: "*" };
+ }
+}
diff --git a/third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_service_messages.proto b/third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_service_messages.proto
new file mode 100644
index 0000000000..7374dc9d8b
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_service_messages.proto
@@ -0,0 +1,116 @@
+// Copyright (c) 2015, Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.admin.table.v1;
+
+import "google/bigtable/admin/table/v1/bigtable_table_data.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table";
+option java_multiple_files = true;
+option java_outer_classname = "BigtableTableServiceMessagesProto";
+option java_package = "com.google.bigtable.admin.table.v1";
+
+
+message CreateTableRequest {
+ // The unique name of the cluster in which to create the new table.
+ string name = 1;
+
+ // The name by which the new table should be referred to within the cluster,
+ // e.g. "foobar" rather than "<cluster_name>/tables/foobar".
+ string table_id = 2;
+
+ // The Table to create. The `name` field of the Table and all of its
+ // ColumnFamilies must be left blank, and will be populated in the response.
+ Table table = 3;
+
+ // The optional list of row keys that will be used to initially split the
+ // table into several tablets (Tablets are similar to HBase regions).
+ // Given two split keys, "s1" and "s2", three tablets will be created,
+ // spanning the key ranges: [, s1), [s1, s2), [s2, ).
+ //
+ // Example:
+ // * Row keys := ["a", "apple", "custom", "customer_1", "customer_2",
+ // "other", "zz"]
+ // * initial_split_keys := ["apple", "customer_1", "customer_2", "other"]
+ // * Key assignment:
+ // - Tablet 1 [, apple) => {"a"}.
+ // - Tablet 2 [apple, customer_1) => {"apple", "custom"}.
+ // - Tablet 3 [customer_1, customer_2) => {"customer_1"}.
+ // - Tablet 4 [customer_2, other) => {"customer_2"}.
+ // - Tablet 5 [other, ) => {"other", "zz"}.
+ repeated string initial_split_keys = 4;
+}
+
+message ListTablesRequest {
+ // The unique name of the cluster for which tables should be listed.
+ string name = 1;
+}
+
+message ListTablesResponse {
+ // The tables present in the requested cluster.
+ // At present, only the names of the tables are populated.
+ repeated Table tables = 1;
+}
+
+message GetTableRequest {
+ // The unique name of the requested table.
+ string name = 1;
+}
+
+message DeleteTableRequest {
+ // The unique name of the table to be deleted.
+ string name = 1;
+}
+
+message RenameTableRequest {
+ // The current unique name of the table.
+ string name = 1;
+
+ // The new name by which the table should be referred to within its containing
+ // cluster, e.g. "foobar" rather than "<cluster_name>/tables/foobar".
+ string new_id = 2;
+}
+
+message CreateColumnFamilyRequest {
+ // The unique name of the table in which to create the new column family.
+ string name = 1;
+
+ // The name by which the new column family should be referred to within the
+ // table, e.g. "foobar" rather than "<table_name>/columnFamilies/foobar".
+ string column_family_id = 2;
+
+ // The column family to create. The `name` field must be left blank.
+ ColumnFamily column_family = 3;
+}
+
+message DeleteColumnFamilyRequest {
+ // The unique name of the column family to be deleted.
+ string name = 1;
+}
+
+message BulkDeleteRowsRequest {
+ // The unique name of the table on which to perform the bulk delete
+ string table_name = 1;
+
+ oneof target {
+ // Delete all rows that start with this row key prefix. Prefix cannot be
+ // zero length.
+ bytes row_key_prefix = 2;
+
+ // Delete all rows in the table. Setting this to false is a no-op.
+ bool delete_all_data_from_table = 3;
+ }
+}
diff --git a/third_party/googleapis/google/bigtable/admin/v2/bigtable_instance_admin.proto b/third_party/googleapis/google/bigtable/admin/v2/bigtable_instance_admin.proto
new file mode 100644
index 0000000000..a4883bfcfa
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/admin/v2/bigtable_instance_admin.proto
@@ -0,0 +1,233 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.admin.v2;
+
+import "google/api/annotations.proto";
+import "google/bigtable/admin/v2/instance.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin";
+option java_multiple_files = true;
+option java_outer_classname = "BigtableInstanceAdminProto";
+option java_package = "com.google.bigtable.admin.v2";
+
+
+// Service for creating, configuring, and deleting Cloud Bigtable Instances and
+// Clusters. Provides access to the Instance and Cluster schemas only, not the
+// tables' metadata or data stored in those tables.
+service BigtableInstanceAdmin {
+ // Create an instance within a project.
+ rpc CreateInstance(CreateInstanceRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v2/{parent=projects/*}/instances" body: "*" };
+ }
+
+ // Gets information about an instance.
+ rpc GetInstance(GetInstanceRequest) returns (Instance) {
+ option (google.api.http) = { get: "/v2/{name=projects/*/instances/*}" };
+ }
+
+ // Lists information about instances in a project.
+ rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) {
+ option (google.api.http) = { get: "/v2/{parent=projects/*}/instances" };
+ }
+
+ // Updates an instance within a project.
+ rpc UpdateInstance(Instance) returns (Instance) {
+ option (google.api.http) = { put: "/v2/{name=projects/*/instances/*}" body: "*" };
+ }
+
+ // Delete an instance from a project.
+ rpc DeleteInstance(DeleteInstanceRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*}" };
+ }
+
+ // Creates a cluster within an instance.
+ rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*}/clusters" body: "cluster" };
+ }
+
+ // Gets information about a cluster.
+ rpc GetCluster(GetClusterRequest) returns (Cluster) {
+ option (google.api.http) = { get: "/v2/{name=projects/*/instances/*/clusters/*}" };
+ }
+
+ // Lists information about clusters in an instance.
+ rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) {
+ option (google.api.http) = { get: "/v2/{parent=projects/*/instances/*}/clusters" };
+ }
+
+ // Updates a cluster within an instance.
+ rpc UpdateCluster(Cluster) returns (google.longrunning.Operation) {
+ option (google.api.http) = { put: "/v2/{name=projects/*/instances/*/clusters/*}" body: "*" };
+ }
+
+ // Deletes a cluster from an instance.
+ rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*/clusters/*}" };
+ }
+}
+
+// Request message for BigtableInstanceAdmin.CreateInstance.
+message CreateInstanceRequest {
+ // The unique name of the project in which to create the new instance.
+ // Values are of the form `projects/<project>`.
+ string parent = 1;
+
+ // The ID to be used when referring to the new instance within its project,
+ // e.g., just `myinstance` rather than
+ // `projects/myproject/instances/myinstance`.
+ string instance_id = 2;
+
+ // The instance to create.
+ // Fields marked `OutputOnly` must be left blank.
+ Instance instance = 3;
+
+ // The clusters to be created within the instance, mapped by desired
+ // cluster ID, e.g., just `mycluster` rather than
+ // `projects/myproject/instances/myinstance/clusters/mycluster`.
+ // Fields marked `OutputOnly` must be left blank.
+ // Currently exactly one cluster must be specified.
+ map<string, Cluster> clusters = 4;
+}
+
+// Request message for BigtableInstanceAdmin.GetInstance.
+message GetInstanceRequest {
+ // The unique name of the requested instance. Values are of the form
+ // `projects/<project>/instances/<instance>`.
+ string name = 1;
+}
+
+// Request message for BigtableInstanceAdmin.ListInstances.
+message ListInstancesRequest {
+ // The unique name of the project for which a list of instances is requested.
+ // Values are of the form `projects/<project>`.
+ string parent = 1;
+
+ // The value of `next_page_token` returned by a previous call.
+ string page_token = 2;
+}
+
+// Response message for BigtableInstanceAdmin.ListInstances.
+message ListInstancesResponse {
+ // The list of requested instances.
+ repeated Instance instances = 1;
+
+ // Locations from which Instance information could not be retrieved,
+ // due to an outage or some other transient condition.
+ // Instances whose Clusters are all in one of the failed locations
+ // may be missing from `instances`, and Instances with at least one
+ // Cluster in a failed location may only have partial information returned.
+ repeated string failed_locations = 2;
+
+ // Set if not all instances could be returned in a single response.
+ // Pass this value to `page_token` in another request to get the next
+ // page of results.
+ string next_page_token = 3;
+}
+
+// Request message for BigtableInstanceAdmin.DeleteInstance.
+message DeleteInstanceRequest {
+ // The unique name of the instance to be deleted.
+ // Values are of the form `projects/<project>/instances/<instance>`.
+ string name = 1;
+}
+
+// Request message for BigtableInstanceAdmin.CreateCluster.
+message CreateClusterRequest {
+ // The unique name of the instance in which to create the new cluster.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>`.
+ string parent = 1;
+
+ // The ID to be used when referring to the new cluster within its instance,
+ // e.g., just `mycluster` rather than
+ // `projects/myproject/instances/myinstance/clusters/mycluster`.
+ string cluster_id = 2;
+
+ // The cluster to be created.
+ // Fields marked `OutputOnly` must be left blank.
+ Cluster cluster = 3;
+}
+
+// Request message for BigtableInstanceAdmin.GetCluster.
+message GetClusterRequest {
+ // The unique name of the requested cluster. Values are of the form
+ // `projects/<project>/instances/<instance>/clusters/<cluster>`.
+ string name = 1;
+}
+
+// Request message for BigtableInstanceAdmin.ListClusters.
+message ListClustersRequest {
+ // The unique name of the instance for which a list of clusters is requested.
+ // Values are of the form `projects/<project>/instances/<instance>`.
+ // Use `<instance> = '-'` to list Clusters for all Instances in a project,
+ // e.g., `projects/myproject/instances/-`.
+ string parent = 1;
+
+ // The value of `next_page_token` returned by a previous call.
+ string page_token = 2;
+}
+
+// Response message for BigtableInstanceAdmin.ListClusters.
+message ListClustersResponse {
+ // The list of requested clusters.
+ repeated Cluster clusters = 1;
+
+ // Locations from which Cluster information could not be retrieved,
+ // due to an outage or some other transient condition.
+ // Clusters from these locations may be missing from `clusters`,
+ // or may only have partial information returned.
+ repeated string failed_locations = 2;
+
+ // Set if not all clusters could be returned in a single response.
+ // Pass this value to `page_token` in another request to get the next
+ // page of results.
+ string next_page_token = 3;
+}
+
+// Request message for BigtableInstanceAdmin.DeleteCluster.
+message DeleteClusterRequest {
+ // The unique name of the cluster to be deleted. Values are of the form
+ // `projects/<project>/instances/<instance>/clusters/<cluster>`.
+ string name = 1;
+}
+
+// The metadata for the Operation returned by CreateInstance.
+message CreateInstanceMetadata {
+ // The request that prompted the initiation of this CreateInstance operation.
+ CreateInstanceRequest original_request = 1;
+
+ // The time at which the original request was received.
+ google.protobuf.Timestamp request_time = 2;
+
+ // The time at which the operation failed or was completed successfully.
+ google.protobuf.Timestamp finish_time = 3;
+}
+
+// The metadata for the Operation returned by UpdateCluster.
+message UpdateClusterMetadata {
+ // The request that prompted the initiation of this UpdateCluster operation.
+ Cluster original_request = 1;
+
+ // The time at which the original request was received.
+ google.protobuf.Timestamp request_time = 2;
+
+ // The time at which the operation failed or was completed successfully.
+ google.protobuf.Timestamp finish_time = 3;
+}
diff --git a/third_party/googleapis/google/bigtable/admin/v2/bigtable_table_admin.proto b/third_party/googleapis/google/bigtable/admin/v2/bigtable_table_admin.proto
new file mode 100644
index 0000000000..a8500cd439
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/admin/v2/bigtable_table_admin.proto
@@ -0,0 +1,214 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.admin.v2;
+
+import "google/api/annotations.proto";
+import "google/api/auth.proto";
+import "google/bigtable/admin/v2/table.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin";
+option java_multiple_files = true;
+option java_outer_classname = "BigtableTableAdminProto";
+option java_package = "com.google.bigtable.admin.v2";
+
+
+// Service for creating, configuring, and deleting Cloud Bigtable tables.
+// Provides access to the table schemas only, not the data stored within
+// the tables.
+service BigtableTableAdmin {
+ // Creates a new table in the specified instance.
+ // The table can be created with a full set of initial column families,
+ // specified in the request.
+ rpc CreateTable(CreateTableRequest) returns (Table) {
+ option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*}/tables" body: "*" };
+ }
+
+ // Lists all tables served from a specified instance.
+ rpc ListTables(ListTablesRequest) returns (ListTablesResponse) {
+ option (google.api.http) = { get: "/v2/{parent=projects/*/instances/*}/tables" };
+ }
+
+ // Gets metadata information about the specified table.
+ rpc GetTable(GetTableRequest) returns (Table) {
+ option (google.api.http) = { get: "/v2/{name=projects/*/instances/*/tables/*}" };
+ }
+
+ // Permanently deletes a specified table and all of its data.
+ rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*/tables/*}" };
+ }
+
+ // Atomically performs a series of column family modifications
+ // on the specified table.
+ rpc ModifyColumnFamilies(ModifyColumnFamiliesRequest) returns (Table) {
+ option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" body: "*" };
+ }
+
+ // Permanently drop/delete a row range from a specified table. The request can
+ // specify whether to delete all rows in a table, or only those that match a
+ // particular prefix.
+ rpc DropRowRange(DropRowRangeRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange" body: "*" };
+ }
+}
+
+// Request message for
+// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable]
+message CreateTableRequest {
+ // An initial split point for a newly created table.
+ message Split {
+ // Row key to use as an initial tablet boundary.
+ bytes key = 1;
+ }
+
+ // The unique name of the instance in which to create the table.
+ // Values are of the form `projects/<project>/instances/<instance>`.
+ string parent = 1;
+
+ // The name by which the new table should be referred to within the parent
+ // instance, e.g., `foobar` rather than `<parent>/tables/foobar`.
+ string table_id = 2;
+
+ // The Table to create.
+ Table table = 3;
+
+ // The optional list of row keys that will be used to initially split the
+ // table into several tablets (tablets are similar to HBase regions).
+ // Given two split keys, `s1` and `s2`, three tablets will be created,
+ // spanning the key ranges: `[, s1), [s1, s2), [s2, )`.
+ //
+ // Example:
+ //
+ // * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",`
+ // `"other", "zz"]`
+ // * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]`
+ // * Key assignment:
+ // - Tablet 1 `[, apple) => {"a"}.`
+ // - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.`
+ // - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.`
+ // - Tablet 4 `[customer_2, other) => {"customer_2"}.`
+ // - Tablet 5 `[other, ) => {"other", "zz"}.`
+ repeated Split initial_splits = 4;
+}
+
+// Request message for
+// [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange]
+message DropRowRangeRequest {
+ // The unique name of the table on which to drop a range of rows.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/tables/<table>`.
+ string name = 1;
+
+ // Delete all rows or by prefix.
+ oneof target {
+ // Delete all rows that start with this row key prefix. Prefix cannot be
+ // zero length.
+ bytes row_key_prefix = 2;
+
+ // Delete all rows in the table. Setting this to false is a no-op.
+ bool delete_all_data_from_table = 3;
+ }
+}
+
+// Request message for
+// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables]
+message ListTablesRequest {
+ // The unique name of the instance for which tables should be listed.
+ // Values are of the form `projects/<project>/instances/<instance>`.
+ string parent = 1;
+
+ // The view to be applied to the returned tables' fields.
+ // Defaults to `NAME_ONLY` if unspecified; no others are currently supported.
+ Table.View view = 2;
+
+ // The value of `next_page_token` returned by a previous call.
+ string page_token = 3;
+}
+
+// Response message for
+// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables]
+message ListTablesResponse {
+ // The tables present in the requested instance.
+ repeated Table tables = 1;
+
+ // Set if not all tables could be returned in a single response.
+ // Pass this value to `page_token` in another request to get the next
+ // page of results.
+ string next_page_token = 2;
+}
+
+// Request message for
+// [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable]
+message GetTableRequest {
+ // The unique name of the requested table.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/tables/<table>`.
+ string name = 1;
+
+ // The view to be applied to the returned table's fields.
+ // Defaults to `SCHEMA_ONLY` if unspecified.
+ Table.View view = 2;
+}
+
+// Request message for
+// [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable]
+message DeleteTableRequest {
+ // The unique name of the table to be deleted.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/tables/<table>`.
+ string name = 1;
+}
+
+// Request message for
+// [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies]
+message ModifyColumnFamiliesRequest {
+ // A create, update, or delete of a particular column family.
+ message Modification {
+ // The ID of the column family to be modified.
+ string id = 1;
+
+ // Column familiy modifications.
+ oneof mod {
+ // Create a new column family with the specified schema, or fail if
+ // one already exists with the given ID.
+ ColumnFamily create = 2;
+
+ // Update an existing column family to the specified schema, or fail
+ // if no column family exists with the given ID.
+ ColumnFamily update = 3;
+
+ // Drop (delete) the column family with the given ID, or fail if no such
+ // family exists.
+ bool drop = 4;
+ }
+ }
+
+ // The unique name of the table whose families should be modified.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/tables/<table>`.
+ string name = 1;
+
+ // Modifications to be atomically applied to the specified table's families.
+ // Entries are applied in order, meaning that earlier modifications can be
+ // masked by later ones (in the case of repeated updates to the same family,
+ // for example).
+ repeated Modification modifications = 2;
+}
diff --git a/third_party/googleapis/google/bigtable/admin/v2/common.proto b/third_party/googleapis/google/bigtable/admin/v2/common.proto
new file mode 100644
index 0000000000..c6e2bb223b
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/admin/v2/common.proto
@@ -0,0 +1,38 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.admin.v2;
+
+import "google/api/annotations.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin";
+option java_multiple_files = true;
+option java_outer_classname = "CommonProto";
+option java_package = "com.google.bigtable.admin.v2";
+
+
+// Storage media types for persisting Bigtable data.
+enum StorageType {
+ // The user did not specify a storage type.
+ STORAGE_TYPE_UNSPECIFIED = 0;
+
+ // Flash (SSD) storage should be used.
+ SSD = 1;
+
+ // Magnetic drive (HDD) storage should be used.
+ HDD = 2;
+}
diff --git a/third_party/googleapis/google/bigtable/admin/v2/instance.proto b/third_party/googleapis/google/bigtable/admin/v2/instance.proto
new file mode 100644
index 0000000000..67921d6e15
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/admin/v2/instance.proto
@@ -0,0 +1,130 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.admin.v2;
+
+import "google/api/annotations.proto";
+import "google/bigtable/admin/v2/common.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin";
+option java_multiple_files = true;
+option java_outer_classname = "InstanceProto";
+option java_package = "com.google.bigtable.admin.v2";
+
+
+// A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and
+// the resources that serve them.
+// All tables in an instance are served from a single
+// [Cluster][google.bigtable.admin.v2.Cluster].
+message Instance {
+ // Possible states of an instance.
+ enum State {
+ // The state of the instance could not be determined.
+ STATE_NOT_KNOWN = 0;
+
+ // The instance has been successfully created and can serve requests
+ // to its tables.
+ READY = 1;
+
+ // The instance is currently being created, and may be destroyed
+ // if the creation process encounters an error.
+ CREATING = 2;
+ }
+
+ // The type of the instance.
+ enum Type {
+ // The type of the instance is unspecified. If set when creating an
+ // instance, a `PRODUCTION` instance will be created. If set when updating
+ // an instance, the type will be left unchanged.
+ TYPE_UNSPECIFIED = 0;
+
+ // An instance meant for production use. `serve_nodes` must be set
+ // on the cluster.
+ PRODUCTION = 1;
+ }
+
+ // (`OutputOnly`)
+ // The unique name of the instance. Values are of the form
+ // `projects/<project>/instances/[a-z][a-z0-9\\-]+[a-z0-9]`.
+ string name = 1;
+
+ // The descriptive name for this instance as it appears in UIs.
+ // Can be changed at any time, but should be kept globally unique
+ // to avoid confusion.
+ string display_name = 2;
+
+ // (`OutputOnly`)
+ // The current state of the instance.
+ State state = 3;
+
+ // The type of the instance. Defaults to `PRODUCTION`.
+ Type type = 4;
+}
+
+// A resizable group of nodes in a particular cloud location, capable
+// of serving all [Tables][google.bigtable.admin.v2.Table] in the parent
+// [Instance][google.bigtable.admin.v2.Instance].
+message Cluster {
+ // Possible states of a cluster.
+ enum State {
+ // The state of the cluster could not be determined.
+ STATE_NOT_KNOWN = 0;
+
+ // The cluster has been successfully created and is ready to serve requests.
+ READY = 1;
+
+ // The cluster is currently being created, and may be destroyed
+ // if the creation process encounters an error.
+ // A cluster may not be able to serve requests while being created.
+ CREATING = 2;
+
+ // The cluster is currently being resized, and may revert to its previous
+ // node count if the process encounters an error.
+ // A cluster is still capable of serving requests while being resized,
+ // but may exhibit performance as if its number of allocated nodes is
+ // between the starting and requested states.
+ RESIZING = 3;
+
+ // The cluster has no backing nodes. The data (tables) still
+ // exist, but no operations can be performed on the cluster.
+ DISABLED = 4;
+ }
+
+ // (`OutputOnly`)
+ // The unique name of the cluster. Values are of the form
+ // `projects/<project>/instances/<instance>/clusters/[a-z][-a-z0-9]*`.
+ string name = 1;
+
+ // (`CreationOnly`)
+ // The location where this cluster's nodes and storage reside. For best
+ // performance, clients should be located as close as possible to this cluster.
+ // Currently only zones are supported, so values should be of the form
+ // `projects/<project>/locations/<zone>`.
+ string location = 2;
+
+ // (`OutputOnly`)
+ // The current state of the cluster.
+ State state = 3;
+
+ // The number of nodes allocated to this cluster. More nodes enable higher
+ // throughput and more consistent performance.
+ int32 serve_nodes = 4;
+
+ // (`CreationOnly`)
+ // The type of storage used by this cluster to serve its
+ // parent instance's tables, unless explicitly overridden.
+ StorageType default_storage_type = 5;
+}
diff --git a/third_party/googleapis/google/bigtable/admin/v2/table.proto b/third_party/googleapis/google/bigtable/admin/v2/table.proto
new file mode 100644
index 0000000000..ce80571f0f
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/admin/v2/table.proto
@@ -0,0 +1,118 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.admin.v2;
+
+import "google/api/annotations.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin";
+option java_multiple_files = true;
+option java_outer_classname = "TableProto";
+option java_package = "com.google.bigtable.admin.v2";
+
+
+// A collection of user data indexed by row, column, and timestamp.
+// Each table is served using the resources of its parent cluster.
+message Table {
+ // Possible timestamp granularities to use when keeping multiple versions
+ // of data in a table.
+ enum TimestampGranularity {
+ // The user did not specify a granularity. Should not be returned.
+ // When specified during table creation, MILLIS will be used.
+ TIMESTAMP_GRANULARITY_UNSPECIFIED = 0;
+
+ // The table keeps data versioned at a granularity of 1ms.
+ MILLIS = 1;
+ }
+
+ // Defines a view over a table's fields.
+ enum View {
+ // Uses the default view for each method as documented in its request.
+ VIEW_UNSPECIFIED = 0;
+
+ // Only populates `name`.
+ NAME_ONLY = 1;
+
+ // Only populates `name` and fields related to the table's schema.
+ SCHEMA_VIEW = 2;
+
+ // Populates all fields.
+ FULL = 4;
+ }
+
+ // (`OutputOnly`)
+ // The unique name of the table. Values are of the form
+ // `projects/<project>/instances/<instance>/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`.
+ // Views: `NAME_ONLY`, `SCHEMA_VIEW`, `FULL`
+ string name = 1;
+
+ // (`CreationOnly`)
+ // The column families configured for this table, mapped by column family ID.
+ // Views: `SCHEMA_VIEW`, `FULL`
+ map<string, ColumnFamily> column_families = 3;
+
+ // (`CreationOnly`)
+ // The granularity (e.g. `MILLIS`, `MICROS`) at which timestamps are stored in
+ // this table. Timestamps not matching the granularity will be rejected.
+ // If unspecified at creation time, the value will be set to `MILLIS`.
+ // Views: `SCHEMA_VIEW`, `FULL`
+ TimestampGranularity granularity = 4;
+}
+
+// A set of columns within a table which share a common configuration.
+message ColumnFamily {
+ // Garbage collection rule specified as a protobuf.
+ // Must serialize to at most 500 bytes.
+ //
+ // NOTE: Garbage collection executes opportunistically in the background, and
+ // so it's possible for reads to return a cell even if it matches the active
+ // GC expression for its family.
+ GcRule gc_rule = 1;
+}
+
+// Rule for determining which cells to delete during garbage collection.
+message GcRule {
+ // A GcRule which deletes cells matching all of the given rules.
+ message Intersection {
+ // Only delete cells which would be deleted by every element of `rules`.
+ repeated GcRule rules = 1;
+ }
+
+ // A GcRule which deletes cells matching any of the given rules.
+ message Union {
+ // Delete cells which would be deleted by any element of `rules`.
+ repeated GcRule rules = 1;
+ }
+
+ // Garbage collection rules.
+ oneof rule {
+ // Delete all cells in a column except the most recent N.
+ int32 max_num_versions = 1;
+
+ // Delete cells in a column older than the given age.
+ // Values must be at least one millisecond, and will be truncated to
+ // microsecond granularity.
+ google.protobuf.Duration max_age = 2;
+
+ // Delete cells that would be deleted by every nested rule.
+ Intersection intersection = 3;
+
+ // Delete cells that would be deleted by any nested rule.
+ Union union = 4;
+ }
+}
diff --git a/third_party/googleapis/google/bigtable/bigtable.yaml b/third_party/googleapis/google/bigtable/bigtable.yaml
new file mode 100644
index 0000000000..f0ce19f90d
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/bigtable.yaml
@@ -0,0 +1,33 @@
+# Google Bigtable API service configuration
+
+type: google.api.Service
+config_version: 0
+name: bigtable.googleapis.com
+title: Google Cloud Bigtable API
+
+documentation:
+ summary:
+ Google Cloud Bigtable - http://cloud.google.com/bigtable/
+
+apis:
+- name: google.bigtable.v2.Bigtable
+
+authentication:
+ rules:
+ # Unless explicitly weakened, all ops require write access
+ - selector: '*'
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/bigtable.data,
+ https://www.googleapis.com/auth/cloud-bigtable.data,
+ https://www.googleapis.com/auth/cloud-platform
+
+ # Ops which only require read access
+ - selector: google.bigtable.v2.Bigtable.ReadRows,
+ google.bigtable.v2.Bigtable.SampleRowKeys
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/bigtable.data,
+ https://www.googleapis.com/auth/bigtable.data.readonly,
+ https://www.googleapis.com/auth/cloud-bigtable.data,
+ https://www.googleapis.com/auth/cloud-bigtable.data.readonly,
+ https://www.googleapis.com/auth/cloud-platform,
+ https://www.googleapis.com/auth/cloud-platform.read-only
diff --git a/third_party/googleapis/google/bigtable/v1/bigtable_data.proto b/third_party/googleapis/google/bigtable/v1/bigtable_data.proto
new file mode 100644
index 0000000000..fe6518ad20
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/v1/bigtable_data.proto
@@ -0,0 +1,516 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.v1;
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/v1;bigtable";
+option java_multiple_files = true;
+option java_outer_classname = "BigtableDataProto";
+option java_package = "com.google.bigtable.v1";
+
+
+// Specifies the complete (requested) contents of a single row of a table.
+// Rows which exceed 256MiB in size cannot be read in full.
+message Row {
+ // The unique key which identifies this row within its table. This is the same
+ // key that's used to identify the row in, for example, a MutateRowRequest.
+ // May contain any non-empty byte string up to 4KiB in length.
+ bytes key = 1;
+
+ // May be empty, but only if the entire row is empty.
+ // The mutual ordering of column families is not specified.
+ repeated Family families = 2;
+}
+
+// Specifies (some of) the contents of a single row/column family of a table.
+message Family {
+ // The unique key which identifies this family within its row. This is the
+ // same key that's used to identify the family in, for example, a RowFilter
+ // which sets its "family_name_regex_filter" field.
+ // Must match [-_.a-zA-Z0-9]+, except that AggregatingRowProcessors may
+ // produce cells in a sentinel family with an empty name.
+ // Must be no greater than 64 characters in length.
+ string name = 1;
+
+ // Must not be empty. Sorted in order of increasing "qualifier".
+ repeated Column columns = 2;
+}
+
+// Specifies (some of) the contents of a single row/column of a table.
+message Column {
+ // The unique key which identifies this column within its family. This is the
+ // same key that's used to identify the column in, for example, a RowFilter
+ // which sets its "column_qualifier_regex_filter" field.
+ // May contain any byte string, including the empty string, up to 16kiB in
+ // length.
+ bytes qualifier = 1;
+
+ // Must not be empty. Sorted in order of decreasing "timestamp_micros".
+ repeated Cell cells = 2;
+}
+
+// Specifies (some of) the contents of a single row/column/timestamp of a table.
+message Cell {
+ // The cell's stored timestamp, which also uniquely identifies it within
+ // its column.
+ // Values are always expressed in microseconds, but individual tables may set
+ // a coarser "granularity" to further restrict the allowed values. For
+ // example, a table which specifies millisecond granularity will only allow
+ // values of "timestamp_micros" which are multiples of 1000.
+ int64 timestamp_micros = 1;
+
+ // The value stored in the cell.
+ // May contain any byte string, including the empty string, up to 100MiB in
+ // length.
+ bytes value = 2;
+
+ // Labels applied to the cell by a [RowFilter][google.bigtable.v1.RowFilter].
+ repeated string labels = 3;
+}
+
+// Specifies a contiguous range of rows.
+message RowRange {
+ // Inclusive lower bound. If left empty, interpreted as the empty string.
+ bytes start_key = 2;
+
+ // Exclusive upper bound. If left empty, interpreted as infinity.
+ bytes end_key = 3;
+}
+
+// Specifies a non-contiguous set of rows.
+message RowSet {
+ // Single rows included in the set.
+ repeated bytes row_keys = 1;
+
+ // Contiguous row ranges included in the set.
+ repeated RowRange row_ranges = 2;
+}
+
+// Specifies a contiguous range of columns within a single column family.
+// The range spans from <column_family>:<start_qualifier> to
+// <column_family>:<end_qualifier>, where both bounds can be either inclusive or
+// exclusive.
+message ColumnRange {
+ // The name of the column family within which this range falls.
+ string family_name = 1;
+
+ // The column qualifier at which to start the range (within 'column_family').
+ // If neither field is set, interpreted as the empty string, inclusive.
+ oneof start_qualifier {
+ // Used when giving an inclusive lower bound for the range.
+ bytes start_qualifier_inclusive = 2;
+
+ // Used when giving an exclusive lower bound for the range.
+ bytes start_qualifier_exclusive = 3;
+ }
+
+ // The column qualifier at which to end the range (within 'column_family').
+ // If neither field is set, interpreted as the infinite string, exclusive.
+ oneof end_qualifier {
+ // Used when giving an inclusive upper bound for the range.
+ bytes end_qualifier_inclusive = 4;
+
+ // Used when giving an exclusive upper bound for the range.
+ bytes end_qualifier_exclusive = 5;
+ }
+}
+
+// Specified a contiguous range of microsecond timestamps.
+message TimestampRange {
+ // Inclusive lower bound. If left empty, interpreted as 0.
+ int64 start_timestamp_micros = 1;
+
+ // Exclusive upper bound. If left empty, interpreted as infinity.
+ int64 end_timestamp_micros = 2;
+}
+
+// Specifies a contiguous range of raw byte values.
+message ValueRange {
+ // The value at which to start the range.
+ // If neither field is set, interpreted as the empty string, inclusive.
+ oneof start_value {
+ // Used when giving an inclusive lower bound for the range.
+ bytes start_value_inclusive = 1;
+
+ // Used when giving an exclusive lower bound for the range.
+ bytes start_value_exclusive = 2;
+ }
+
+ // The value at which to end the range.
+ // If neither field is set, interpreted as the infinite string, exclusive.
+ oneof end_value {
+ // Used when giving an inclusive upper bound for the range.
+ bytes end_value_inclusive = 3;
+
+ // Used when giving an exclusive upper bound for the range.
+ bytes end_value_exclusive = 4;
+ }
+}
+
+// Takes a row as input and produces an alternate view of the row based on
+// specified rules. For example, a RowFilter might trim down a row to include
+// just the cells from columns matching a given regular expression, or might
+// return all the cells of a row but not their values. More complicated filters
+// can be composed out of these components to express requests such as, "within
+// every column of a particular family, give just the two most recent cells
+// which are older than timestamp X."
+//
+// There are two broad categories of RowFilters (true filters and transformers),
+// as well as two ways to compose simple filters into more complex ones
+// (chains and interleaves). They work as follows:
+//
+// * True filters alter the input row by excluding some of its cells wholesale
+// from the output row. An example of a true filter is the "value_regex_filter",
+// which excludes cells whose values don't match the specified pattern. All
+// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax)
+// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An
+// important point to keep in mind is that RE2(.) is equivalent by default to
+// RE2([^\n]), meaning that it does not match newlines. When attempting to match
+// an arbitrary byte, you should therefore use the escape sequence '\C', which
+// may need to be further escaped as '\\C' in your client language.
+//
+// * Transformers alter the input row by changing the values of some of its
+// cells in the output, without excluding them completely. Currently, the only
+// supported transformer is the "strip_value_transformer", which replaces every
+// cell's value with the empty string.
+//
+// * Chains and interleaves are described in more detail in the
+// RowFilter.Chain and RowFilter.Interleave documentation.
+//
+// The total serialized size of a RowFilter message must not
+// exceed 4096 bytes, and RowFilters may not be nested within each other
+// (in Chains or Interleaves) to a depth of more than 20.
+message RowFilter {
+ // A RowFilter which sends rows through several RowFilters in sequence.
+ message Chain {
+ // The elements of "filters" are chained together to process the input row:
+ // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row
+ // The full chain is executed atomically.
+ repeated RowFilter filters = 1;
+ }
+
+ // A RowFilter which sends each row to each of several component
+ // RowFilters and interleaves the results.
+ message Interleave {
+ // The elements of "filters" all process a copy of the input row, and the
+ // results are pooled, sorted, and combined into a single output row.
+ // If multiple cells are produced with the same column and timestamp,
+ // they will all appear in the output row in an unspecified mutual order.
+ // Consider the following example, with three filters:
+ //
+ // input row
+ // |
+ // -----------------------------------------------------
+ // | | |
+ // f(0) f(1) f(2)
+ // | | |
+ // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a
+ // 2: foo,blah,11,z far,blah,5,x far,blah,5,x
+ // | | |
+ // -----------------------------------------------------
+ // |
+ // 1: foo,bar,10,z // could have switched with #2
+ // 2: foo,bar,10,x // could have switched with #1
+ // 3: foo,blah,11,z
+ // 4: far,bar,7,a
+ // 5: far,blah,5,x // identical to #6
+ // 6: far,blah,5,x // identical to #5
+ // All interleaved filters are executed atomically.
+ repeated RowFilter filters = 1;
+ }
+
+ // A RowFilter which evaluates one of two possible RowFilters, depending on
+ // whether or not a predicate RowFilter outputs any cells from the input row.
+ //
+ // IMPORTANT NOTE: The predicate filter does not execute atomically with the
+ // true and false filters, which may lead to inconsistent or unexpected
+ // results. Additionally, Condition filters have poor performance, especially
+ // when filters are set for the false condition.
+ message Condition {
+ // If "predicate_filter" outputs any cells, then "true_filter" will be
+ // evaluated on the input row. Otherwise, "false_filter" will be evaluated.
+ RowFilter predicate_filter = 1;
+
+ // The filter to apply to the input row if "predicate_filter" returns any
+ // results. If not provided, no results will be returned in the true case.
+ RowFilter true_filter = 2;
+
+ // The filter to apply to the input row if "predicate_filter" does not
+ // return any results. If not provided, no results will be returned in the
+ // false case.
+ RowFilter false_filter = 3;
+ }
+
+ // Which of the possible RowFilter types to apply. If none are set, this
+ // RowFilter returns all cells in the input row.
+ oneof filter {
+ // Applies several RowFilters to the data in sequence, progressively
+ // narrowing the results.
+ Chain chain = 1;
+
+ // Applies several RowFilters to the data in parallel and combines the
+ // results.
+ Interleave interleave = 2;
+
+ // Applies one of two possible RowFilters to the data based on the output of
+ // a predicate RowFilter.
+ Condition condition = 3;
+
+ // ADVANCED USE ONLY.
+ // Hook for introspection into the RowFilter. Outputs all cells directly to
+ // the output of the read rather than to any parent filter. Consider the
+ // following example:
+ //
+ // Chain(
+ // FamilyRegex("A"),
+ // Interleave(
+ // All(),
+ // Chain(Label("foo"), Sink())
+ // ),
+ // QualifierRegex("B")
+ // )
+ //
+ // A,A,1,w
+ // A,B,2,x
+ // B,B,4,z
+ // |
+ // FamilyRegex("A")
+ // |
+ // A,A,1,w
+ // A,B,2,x
+ // |
+ // +------------+-------------+
+ // | |
+ // All() Label(foo)
+ // | |
+ // A,A,1,w A,A,1,w,labels:[foo]
+ // A,B,2,x A,B,2,x,labels:[foo]
+ // | |
+ // | Sink() --------------+
+ // | | |
+ // +------------+ x------+ A,A,1,w,labels:[foo]
+ // | A,B,2,x,labels:[foo]
+ // A,A,1,w |
+ // A,B,2,x |
+ // | |
+ // QualifierRegex("B") |
+ // | |
+ // A,B,2,x |
+ // | |
+ // +--------------------------------+
+ // |
+ // A,A,1,w,labels:[foo]
+ // A,B,2,x,labels:[foo] // could be switched
+ // A,B,2,x // could be switched
+ //
+ // Despite being excluded by the qualifier filter, a copy of every cell
+ // that reaches the sink is present in the final result.
+ //
+ // As with an [Interleave][google.bigtable.v1.RowFilter.Interleave],
+ // duplicate cells are possible, and appear in an unspecified mutual order.
+ // In this case we have a duplicate with column "A:B" and timestamp 2,
+ // because one copy passed through the all filter while the other was
+ // passed through the label and sink. Note that one copy has label "foo",
+ // while the other does not.
+ //
+ // Cannot be used within the `predicate_filter`, `true_filter`, or
+ // `false_filter` of a [Condition][google.bigtable.v1.RowFilter.Condition].
+ bool sink = 16;
+
+ // Matches all cells, regardless of input. Functionally equivalent to
+ // leaving `filter` unset, but included for completeness.
+ bool pass_all_filter = 17;
+
+ // Does not match any cells, regardless of input. Useful for temporarily
+ // disabling just part of a filter.
+ bool block_all_filter = 18;
+
+ // Matches only cells from rows whose keys satisfy the given RE2 regex. In
+ // other words, passes through the entire row when the key matches, and
+ // otherwise produces an empty row.
+ // Note that, since row keys can contain arbitrary bytes, the '\C' escape
+ // sequence must be used if a true wildcard is desired. The '.' character
+ // will not match the new line character '\n', which may be present in a
+ // binary key.
+ bytes row_key_regex_filter = 4;
+
+ // Matches all cells from a row with probability p, and matches no cells
+ // from the row with probability 1-p.
+ double row_sample_filter = 14;
+
+ // Matches only cells from columns whose families satisfy the given RE2
+ // regex. For technical reasons, the regex must not contain the ':'
+ // character, even if it is not being used as a literal.
+ // Note that, since column families cannot contain the new line character
+ // '\n', it is sufficient to use '.' as a full wildcard when matching
+ // column family names.
+ string family_name_regex_filter = 5;
+
+ // Matches only cells from columns whose qualifiers satisfy the given RE2
+ // regex.
+ // Note that, since column qualifiers can contain arbitrary bytes, the '\C'
+ // escape sequence must be used if a true wildcard is desired. The '.'
+ // character will not match the new line character '\n', which may be
+ // present in a binary qualifier.
+ bytes column_qualifier_regex_filter = 6;
+
+ // Matches only cells from columns within the given range.
+ ColumnRange column_range_filter = 7;
+
+ // Matches only cells with timestamps within the given range.
+ TimestampRange timestamp_range_filter = 8;
+
+ // Matches only cells with values that satisfy the given regular expression.
+ // Note that, since cell values can contain arbitrary bytes, the '\C' escape
+ // sequence must be used if a true wildcard is desired. The '.' character
+ // will not match the new line character '\n', which may be present in a
+ // binary value.
+ bytes value_regex_filter = 9;
+
+ // Matches only cells with values that fall within the given range.
+ ValueRange value_range_filter = 15;
+
+ // Skips the first N cells of each row, matching all subsequent cells.
+ // If duplicate cells are present, as is possible when using an Interleave,
+ // each copy of the cell is counted separately.
+ int32 cells_per_row_offset_filter = 10;
+
+ // Matches only the first N cells of each row.
+ // If duplicate cells are present, as is possible when using an Interleave,
+ // each copy of the cell is counted separately.
+ int32 cells_per_row_limit_filter = 11;
+
+ // Matches only the most recent N cells within each column. For example,
+ // if N=2, this filter would match column "foo:bar" at timestamps 10 and 9,
+ // skip all earlier cells in "foo:bar", and then begin matching again in
+ // column "foo:bar2".
+ // If duplicate cells are present, as is possible when using an Interleave,
+ // each copy of the cell is counted separately.
+ int32 cells_per_column_limit_filter = 12;
+
+ // Replaces each cell's value with the empty string.
+ bool strip_value_transformer = 13;
+
+ // Applies the given label to all cells in the output row. This allows
+ // the client to determine which results were produced from which part of
+ // the filter.
+ //
+ // Values must be at most 15 characters in length, and match the RE2
+ // pattern [a-z0-9\\-]+
+ //
+ // Due to a technical limitation, it is not currently possible to apply
+ // multiple labels to a cell. As a result, a Chain may have no more than
+ // one sub-filter which contains a apply_label_transformer. It is okay for
+ // an Interleave to contain multiple apply_label_transformers, as they will
+ // be applied to separate copies of the input. This may be relaxed in the
+ // future.
+ string apply_label_transformer = 19;
+ }
+}
+
+// Specifies a particular change to be made to the contents of a row.
+message Mutation {
+ // A Mutation which sets the value of the specified cell.
+ message SetCell {
+ // The name of the family into which new data should be written.
+ // Must match [-_.a-zA-Z0-9]+
+ string family_name = 1;
+
+ // The qualifier of the column into which new data should be written.
+ // Can be any byte string, including the empty string.
+ bytes column_qualifier = 2;
+
+ // The timestamp of the cell into which new data should be written.
+ // Use -1 for current Bigtable server time.
+ // Otherwise, the client should set this value itself, noting that the
+ // default value is a timestamp of zero if the field is left unspecified.
+ // Values must match the "granularity" of the table (e.g. micros, millis).
+ int64 timestamp_micros = 3;
+
+ // The value to be written into the specified cell.
+ bytes value = 4;
+ }
+
+ // A Mutation which deletes cells from the specified column, optionally
+ // restricting the deletions to a given timestamp range.
+ message DeleteFromColumn {
+ // The name of the family from which cells should be deleted.
+ // Must match [-_.a-zA-Z0-9]+
+ string family_name = 1;
+
+ // The qualifier of the column from which cells should be deleted.
+ // Can be any byte string, including the empty string.
+ bytes column_qualifier = 2;
+
+ // The range of timestamps within which cells should be deleted.
+ TimestampRange time_range = 3;
+ }
+
+ // A Mutation which deletes all cells from the specified column family.
+ message DeleteFromFamily {
+ // The name of the family from which cells should be deleted.
+ // Must match [-_.a-zA-Z0-9]+
+ string family_name = 1;
+ }
+
+ // A Mutation which deletes all cells from the containing row.
+ message DeleteFromRow {
+
+ }
+
+ // Which of the possible Mutation types to apply.
+ oneof mutation {
+ // Set a cell's value.
+ SetCell set_cell = 1;
+
+ // Deletes cells from a column.
+ DeleteFromColumn delete_from_column = 2;
+
+ // Deletes cells from a column family.
+ DeleteFromFamily delete_from_family = 3;
+
+ // Deletes cells from the entire row.
+ DeleteFromRow delete_from_row = 4;
+ }
+}
+
+// Specifies an atomic read/modify/write operation on the latest value of the
+// specified column.
+message ReadModifyWriteRule {
+ // The name of the family to which the read/modify/write should be applied.
+ // Must match [-_.a-zA-Z0-9]+
+ string family_name = 1;
+
+ // The qualifier of the column to which the read/modify/write should be
+ // applied.
+ // Can be any byte string, including the empty string.
+ bytes column_qualifier = 2;
+
+ // The rule used to determine the column's new latest value from its current
+ // latest value.
+ oneof rule {
+ // Rule specifying that "append_value" be appended to the existing value.
+ // If the targeted cell is unset, it will be treated as containing the
+ // empty string.
+ bytes append_value = 3;
+
+ // Rule specifying that "increment_amount" be added to the existing value.
+ // If the targeted cell is unset, it will be treated as containing a zero.
+ // Otherwise, the targeted cell must contain an 8-byte value (interpreted
+ // as a 64-bit big-endian signed integer), or the entire request will fail.
+ int64 increment_amount = 4;
+ }
+}
diff --git a/third_party/googleapis/google/bigtable/v1/bigtable_service.proto b/third_party/googleapis/google/bigtable/v1/bigtable_service.proto
new file mode 100644
index 0000000000..6d41a1b842
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/v1/bigtable_service.proto
@@ -0,0 +1,74 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.v1;
+
+import "google/api/annotations.proto";
+import "google/bigtable/v1/bigtable_data.proto";
+import "google/bigtable/v1/bigtable_service_messages.proto";
+import "google/protobuf/empty.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/v1;bigtable";
+option java_generic_services = true;
+option java_multiple_files = true;
+option java_outer_classname = "BigtableServicesProto";
+option java_package = "com.google.bigtable.v1";
+
+
+// Service for reading from and writing to existing Bigtables.
+service BigtableService {
+ // Streams back the contents of all requested rows, optionally applying
+ // the same Reader filter to each. Depending on their size, rows may be
+ // broken up across multiple responses, but atomicity of each row will still
+ // be preserved.
+ rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) {
+ option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:read" body: "*" };
+ }
+
+ // Returns a sample of row keys in the table. The returned row keys will
+ // delimit contiguous sections of the table of approximately equal size,
+ // which can be used to break up the data for distributed tasks like
+ // mapreduces.
+ rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) {
+ option (google.api.http) = { get: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:sampleKeys" };
+ }
+
+ // Mutates a row atomically. Cells already present in the row are left
+ // unchanged unless explicitly changed by 'mutation'.
+ rpc MutateRow(MutateRowRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:mutate" body: "*" };
+ }
+
+ // Mutates multiple rows in a batch. Each individual row is mutated
+ // atomically as in MutateRow, but the entire batch is not executed
+ // atomically.
+ rpc MutateRows(MutateRowsRequest) returns (MutateRowsResponse) {
+ option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:mutateRows" body: "*" };
+ }
+
+ // Mutates a row atomically based on the output of a predicate Reader filter.
+ rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) {
+ option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:checkAndMutate" body: "*" };
+ }
+
+ // Modifies a row atomically, reading the latest existing timestamp/value from
+ // the specified columns and writing a new value at
+ // max(existing timestamp, current server time) based on pre-defined
+ // read/modify/write rules. Returns the new contents of all modified cells.
+ rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (Row) {
+ option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:readModifyWrite" body: "*" };
+ }
+}
diff --git a/third_party/googleapis/google/bigtable/v1/bigtable_service_messages.proto b/third_party/googleapis/google/bigtable/v1/bigtable_service_messages.proto
new file mode 100644
index 0000000000..6d75af78e1
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/v1/bigtable_service_messages.proto
@@ -0,0 +1,218 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.v1;
+
+import "google/bigtable/v1/bigtable_data.proto";
+import "google/rpc/status.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/v1;bigtable";
+option java_multiple_files = true;
+option java_outer_classname = "BigtableServiceMessagesProto";
+option java_package = "com.google.bigtable.v1";
+
+
+// Request message for BigtableServer.ReadRows.
+message ReadRowsRequest {
+ // The unique name of the table from which to read.
+ string table_name = 1;
+
+ // If neither row_key nor row_range is set, reads from all rows.
+ oneof target {
+ // The key of a single row from which to read.
+ bytes row_key = 2;
+
+ // A range of rows from which to read.
+ RowRange row_range = 3;
+
+ // A set of rows from which to read. Entries need not be in order, and will
+ // be deduplicated before reading.
+ // The total serialized size of the set must not exceed 1MB.
+ RowSet row_set = 8;
+ }
+
+ // The filter to apply to the contents of the specified row(s). If unset,
+ // reads the entire table.
+ RowFilter filter = 5;
+
+ // By default, rows are read sequentially, producing results which are
+ // guaranteed to arrive in increasing row order. Setting
+ // "allow_row_interleaving" to true allows multiple rows to be interleaved in
+ // the response stream, which increases throughput but breaks this guarantee,
+ // and may force the client to use more memory to buffer partially-received
+ // rows. Cannot be set to true when specifying "num_rows_limit".
+ bool allow_row_interleaving = 6;
+
+ // The read will terminate after committing to N rows' worth of results. The
+ // default (zero) is to return all results.
+ // Note that "allow_row_interleaving" cannot be set to true when this is set.
+ int64 num_rows_limit = 7;
+}
+
+// Response message for BigtableService.ReadRows.
+message ReadRowsResponse {
+ // Specifies a piece of a row's contents returned as part of the read
+ // response stream.
+ message Chunk {
+ oneof chunk {
+ // A subset of the data from a particular row. As long as no "reset_row"
+ // is received in between, multiple "row_contents" from the same row are
+ // from the same atomic view of that row, and will be received in the
+ // expected family/column/timestamp order.
+ Family row_contents = 1;
+
+ // Indicates that the client should drop all previous chunks for
+ // "row_key", as it will be re-read from the beginning.
+ bool reset_row = 2;
+
+ // Indicates that the client can safely process all previous chunks for
+ // "row_key", as its data has been fully read.
+ bool commit_row = 3;
+ }
+ }
+
+ // The key of the row for which we're receiving data.
+ // Results will be received in increasing row key order, unless
+ // "allow_row_interleaving" was specified in the request.
+ bytes row_key = 1;
+
+ // One or more chunks of the row specified by "row_key".
+ repeated Chunk chunks = 2;
+}
+
+// Request message for BigtableService.SampleRowKeys.
+message SampleRowKeysRequest {
+ // The unique name of the table from which to sample row keys.
+ string table_name = 1;
+}
+
+// Response message for BigtableService.SampleRowKeys.
+message SampleRowKeysResponse {
+ // Sorted streamed sequence of sample row keys in the table. The table might
+ // have contents before the first row key in the list and after the last one,
+ // but a key containing the empty string indicates "end of table" and will be
+ // the last response given, if present.
+ // Note that row keys in this list may not have ever been written to or read
+ // from, and users should therefore not make any assumptions about the row key
+ // structure that are specific to their use case.
+ bytes row_key = 1;
+
+ // Approximate total storage space used by all rows in the table which precede
+ // "row_key". Buffering the contents of all rows between two subsequent
+ // samples would require space roughly equal to the difference in their
+ // "offset_bytes" fields.
+ int64 offset_bytes = 2;
+}
+
+// Request message for BigtableService.MutateRow.
+message MutateRowRequest {
+ // The unique name of the table to which the mutation should be applied.
+ string table_name = 1;
+
+ // The key of the row to which the mutation should be applied.
+ bytes row_key = 2;
+
+ // Changes to be atomically applied to the specified row. Entries are applied
+ // in order, meaning that earlier mutations can be masked by later ones.
+ // Must contain at least one entry and at most 100000.
+ repeated Mutation mutations = 3;
+}
+
+// Request message for BigtableService.MutateRows.
+message MutateRowsRequest {
+ message Entry {
+ // The key of the row to which the `mutations` should be applied.
+ bytes row_key = 1;
+
+ // Changes to be atomically applied to the specified row. Mutations are
+ // applied in order, meaning that earlier mutations can be masked by
+ // later ones.
+ // At least one mutation must be specified.
+ repeated Mutation mutations = 2;
+ }
+
+ // The unique name of the table to which the mutations should be applied.
+ string table_name = 1;
+
+ // The row keys/mutations to be applied in bulk.
+ // Each entry is applied as an atomic mutation, but the entries may be
+ // applied in arbitrary order (even between entries for the same row).
+ // At least one entry must be specified, and in total the entries may
+ // contain at most 100000 mutations.
+ repeated Entry entries = 2;
+}
+
+// Response message for BigtableService.MutateRows.
+message MutateRowsResponse {
+ // The results for each Entry from the request, presented in the order
+ // in which the entries were originally given.
+ // Depending on how requests are batched during execution, it is possible
+ // for one Entry to fail due to an error with another Entry. In the event
+ // that this occurs, the same error will be reported for both entries.
+ repeated google.rpc.Status statuses = 1;
+}
+
+// Request message for BigtableService.CheckAndMutateRowRequest
+message CheckAndMutateRowRequest {
+ // The unique name of the table to which the conditional mutation should be
+ // applied.
+ string table_name = 1;
+
+ // The key of the row to which the conditional mutation should be applied.
+ bytes row_key = 2;
+
+ // The filter to be applied to the contents of the specified row. Depending
+ // on whether or not any results are yielded, either "true_mutations" or
+ // "false_mutations" will be executed. If unset, checks that the row contains
+ // any values at all.
+ RowFilter predicate_filter = 6;
+
+ // Changes to be atomically applied to the specified row if "predicate_filter"
+ // yields at least one cell when applied to "row_key". Entries are applied in
+ // order, meaning that earlier mutations can be masked by later ones.
+ // Must contain at least one entry if "false_mutations" is empty, and at most
+ // 100000.
+ repeated Mutation true_mutations = 4;
+
+ // Changes to be atomically applied to the specified row if "predicate_filter"
+ // does not yield any cells when applied to "row_key". Entries are applied in
+ // order, meaning that earlier mutations can be masked by later ones.
+ // Must contain at least one entry if "true_mutations" is empty, and at most
+ // 100000.
+ repeated Mutation false_mutations = 5;
+}
+
+// Response message for BigtableService.CheckAndMutateRowRequest.
+message CheckAndMutateRowResponse {
+ // Whether or not the request's "predicate_filter" yielded any results for
+ // the specified row.
+ bool predicate_matched = 1;
+}
+
+// Request message for BigtableService.ReadModifyWriteRowRequest.
+message ReadModifyWriteRowRequest {
+ // The unique name of the table to which the read/modify/write rules should be
+ // applied.
+ string table_name = 1;
+
+ // The key of the row to which the read/modify/write rules should be applied.
+ bytes row_key = 2;
+
+ // Rules specifying how the specified row's contents are to be transformed
+ // into writes. Entries are applied in order, meaning that earlier rules will
+ // affect the results of later ones.
+ repeated ReadModifyWriteRule rules = 3;
+}
diff --git a/third_party/googleapis/google/bigtable/v2/bigtable.proto b/third_party/googleapis/google/bigtable/v2/bigtable.proto
new file mode 100644
index 0000000000..5e8859ed52
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/v2/bigtable.proto
@@ -0,0 +1,322 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.v2;
+
+import "google/api/annotations.proto";
+import "google/bigtable/v2/data.proto";
+import "google/protobuf/wrappers.proto";
+import "google/rpc/status.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/v2;bigtable";
+option java_multiple_files = true;
+option java_outer_classname = "BigtableProto";
+option java_package = "com.google.bigtable.v2";
+
+
+// Service for reading from and writing to existing Bigtable tables.
+service Bigtable {
+ // Streams back the contents of all requested rows, optionally
+ // applying the same Reader filter to each. Depending on their size,
+ // rows and cells may be broken up across multiple responses, but
+ // atomicity of each row will still be preserved. See the
+ // ReadRowsResponse documentation for details.
+ rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) {
+ option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readRows" body: "*" };
+ }
+
+ // Returns a sample of row keys in the table. The returned row keys will
+ // delimit contiguous sections of the table of approximately equal size,
+ // which can be used to break up the data for distributed tasks like
+ // mapreduces.
+ rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) {
+ option (google.api.http) = { get: "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" };
+ }
+
+ // Mutates a row atomically. Cells already present in the row are left
+ // unchanged unless explicitly changed by `mutation`.
+ rpc MutateRow(MutateRowRequest) returns (MutateRowResponse) {
+ option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow" body: "*" };
+ }
+
+ // Mutates multiple rows in a batch. Each individual row is mutated
+ // atomically as in MutateRow, but the entire batch is not executed
+ // atomically.
+ rpc MutateRows(MutateRowsRequest) returns (stream MutateRowsResponse) {
+ option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows" body: "*" };
+ }
+
+ // Mutates a row atomically based on the output of a predicate Reader filter.
+ rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) {
+ option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow" body: "*" };
+ }
+
+ // Modifies a row atomically. The method reads the latest existing timestamp
+ // and value from the specified columns and writes a new entry based on
+ // pre-defined read/modify/write rules. The new value for the timestamp is the
+ // greater of the existing timestamp or the current server time. The method
+ // returns the new contents of all modified cells.
+ rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (ReadModifyWriteRowResponse) {
+ option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow" body: "*" };
+ }
+}
+
+// Request message for Bigtable.ReadRows.
+message ReadRowsRequest {
+ // The unique name of the table from which to read.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/tables/<table>`.
+ string table_name = 1;
+
+ // The row keys and/or ranges to read. If not specified, reads from all rows.
+ RowSet rows = 2;
+
+ // The filter to apply to the contents of the specified row(s). If unset,
+ // reads the entirety of each row.
+ RowFilter filter = 3;
+
+ // The read will terminate after committing to N rows' worth of results. The
+ // default (zero) is to return all results.
+ int64 rows_limit = 4;
+}
+
+// Response message for Bigtable.ReadRows.
+message ReadRowsResponse {
+ // Specifies a piece of a row's contents returned as part of the read
+ // response stream.
+ message CellChunk {
+ // The row key for this chunk of data. If the row key is empty,
+ // this CellChunk is a continuation of the same row as the previous
+ // CellChunk in the response stream, even if that CellChunk was in a
+ // previous ReadRowsResponse message.
+ bytes row_key = 1;
+
+ // The column family name for this chunk of data. If this message
+ // is not present this CellChunk is a continuation of the same column
+ // family as the previous CellChunk. The empty string can occur as a
+ // column family name in a response so clients must check
+ // explicitly for the presence of this message, not just for
+ // `family_name.value` being non-empty.
+ google.protobuf.StringValue family_name = 2;
+
+ // The column qualifier for this chunk of data. If this message
+ // is not present, this CellChunk is a continuation of the same column
+ // as the previous CellChunk. Column qualifiers may be empty so
+ // clients must check for the presence of this message, not just
+ // for `qualifier.value` being non-empty.
+ google.protobuf.BytesValue qualifier = 3;
+
+ // The cell's stored timestamp, which also uniquely identifies it
+ // within its column. Values are always expressed in
+ // microseconds, but individual tables may set a coarser
+ // granularity to further restrict the allowed values. For
+ // example, a table which specifies millisecond granularity will
+ // only allow values of `timestamp_micros` which are multiples of
+ // 1000. Timestamps are only set in the first CellChunk per cell
+ // (for cells split into multiple chunks).
+ int64 timestamp_micros = 4;
+
+ // Labels applied to the cell by a
+ // [RowFilter][google.bigtable.v2.RowFilter]. Labels are only set
+ // on the first CellChunk per cell.
+ repeated string labels = 5;
+
+ // The value stored in the cell. Cell values can be split across
+ // multiple CellChunks. In that case only the value field will be
+ // set in CellChunks after the first: the timestamp and labels
+ // will only be present in the first CellChunk, even if the first
+ // CellChunk came in a previous ReadRowsResponse.
+ bytes value = 6;
+
+ // If this CellChunk is part of a chunked cell value and this is
+ // not the final chunk of that cell, value_size will be set to the
+ // total length of the cell value. The client can use this size
+ // to pre-allocate memory to hold the full cell value.
+ int32 value_size = 7;
+
+ oneof row_status {
+ // Indicates that the client should drop all previous chunks for
+ // `row_key`, as it will be re-read from the beginning.
+ bool reset_row = 8;
+
+ // Indicates that the client can safely process all previous chunks for
+ // `row_key`, as its data has been fully read.
+ bool commit_row = 9;
+ }
+ }
+
+ repeated CellChunk chunks = 1;
+
+ // Optionally the server might return the row key of the last row it
+ // has scanned. The client can use this to construct a more
+ // efficient retry request if needed: any row keys or portions of
+ // ranges less than this row key can be dropped from the request.
+ // This is primarily useful for cases where the server has read a
+ // lot of data that was filtered out since the last committed row
+ // key, allowing the client to skip that work on a retry.
+ bytes last_scanned_row_key = 2;
+}
+
+// Request message for Bigtable.SampleRowKeys.
+message SampleRowKeysRequest {
+ // The unique name of the table from which to sample row keys.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/tables/<table>`.
+ string table_name = 1;
+}
+
+// Response message for Bigtable.SampleRowKeys.
+message SampleRowKeysResponse {
+ // Sorted streamed sequence of sample row keys in the table. The table might
+ // have contents before the first row key in the list and after the last one,
+ // but a key containing the empty string indicates "end of table" and will be
+ // the last response given, if present.
+ // Note that row keys in this list may not have ever been written to or read
+ // from, and users should therefore not make any assumptions about the row key
+ // structure that are specific to their use case.
+ bytes row_key = 1;
+
+ // Approximate total storage space used by all rows in the table which precede
+ // `row_key`. Buffering the contents of all rows between two subsequent
+ // samples would require space roughly equal to the difference in their
+ // `offset_bytes` fields.
+ int64 offset_bytes = 2;
+}
+
+// Request message for Bigtable.MutateRow.
+message MutateRowRequest {
+ // The unique name of the table to which the mutation should be applied.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/tables/<table>`.
+ string table_name = 1;
+
+ // The key of the row to which the mutation should be applied.
+ bytes row_key = 2;
+
+ // Changes to be atomically applied to the specified row. Entries are applied
+ // in order, meaning that earlier mutations can be masked by later ones.
+ // Must contain at least one entry and at most 100000.
+ repeated Mutation mutations = 3;
+}
+
+// Response message for Bigtable.MutateRow.
+message MutateRowResponse {
+
+}
+
+// Request message for BigtableService.MutateRows.
+message MutateRowsRequest {
+ message Entry {
+ // The key of the row to which the `mutations` should be applied.
+ bytes row_key = 1;
+
+ // Changes to be atomically applied to the specified row. Mutations are
+ // applied in order, meaning that earlier mutations can be masked by
+ // later ones.
+ // You must specify at least one mutation.
+ repeated Mutation mutations = 2;
+ }
+
+ // The unique name of the table to which the mutations should be applied.
+ string table_name = 1;
+
+ // The row keys and corresponding mutations to be applied in bulk.
+ // Each entry is applied as an atomic mutation, but the entries may be
+ // applied in arbitrary order (even between entries for the same row).
+ // At least one entry must be specified, and in total the entries can
+ // contain at most 100000 mutations.
+ repeated Entry entries = 2;
+}
+
+// Response message for BigtableService.MutateRows.
+message MutateRowsResponse {
+ message Entry {
+ // The index into the original request's `entries` list of the Entry
+ // for which a result is being reported.
+ int64 index = 1;
+
+ // The result of the request Entry identified by `index`.
+ // Depending on how requests are batched during execution, it is possible
+ // for one Entry to fail due to an error with another Entry. In the event
+ // that this occurs, the same error will be reported for both entries.
+ google.rpc.Status status = 2;
+ }
+
+ // One or more results for Entries from the batch request.
+ repeated Entry entries = 1;
+}
+
+// Request message for Bigtable.CheckAndMutateRow.
+message CheckAndMutateRowRequest {
+ // The unique name of the table to which the conditional mutation should be
+ // applied.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/tables/<table>`.
+ string table_name = 1;
+
+ // The key of the row to which the conditional mutation should be applied.
+ bytes row_key = 2;
+
+ // The filter to be applied to the contents of the specified row. Depending
+ // on whether or not any results are yielded, either `true_mutations` or
+ // `false_mutations` will be executed. If unset, checks that the row contains
+ // any values at all.
+ RowFilter predicate_filter = 6;
+
+ // Changes to be atomically applied to the specified row if `predicate_filter`
+ // yields at least one cell when applied to `row_key`. Entries are applied in
+ // order, meaning that earlier mutations can be masked by later ones.
+ // Must contain at least one entry if `false_mutations` is empty, and at most
+ // 100000.
+ repeated Mutation true_mutations = 4;
+
+ // Changes to be atomically applied to the specified row if `predicate_filter`
+ // does not yield any cells when applied to `row_key`. Entries are applied in
+ // order, meaning that earlier mutations can be masked by later ones.
+ // Must contain at least one entry if `true_mutations` is empty, and at most
+ // 100000.
+ repeated Mutation false_mutations = 5;
+}
+
+// Response message for Bigtable.CheckAndMutateRow.
+message CheckAndMutateRowResponse {
+ // Whether or not the request's `predicate_filter` yielded any results for
+ // the specified row.
+ bool predicate_matched = 1;
+}
+
+// Request message for Bigtable.ReadModifyWriteRow.
+message ReadModifyWriteRowRequest {
+ // The unique name of the table to which the read/modify/write rules should be
+ // applied.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/tables/<table>`.
+ string table_name = 1;
+
+ // The key of the row to which the read/modify/write rules should be applied.
+ bytes row_key = 2;
+
+ // Rules specifying how the specified row's contents are to be transformed
+ // into writes. Entries are applied in order, meaning that earlier rules will
+ // affect the results of later ones.
+ repeated ReadModifyWriteRule rules = 3;
+}
+
+// Response message for Bigtable.ReadModifyWriteRow.
+message ReadModifyWriteRowResponse {
+ // A Row containing the new contents of all cells modified by the request.
+ Row row = 1;
+}
diff --git a/third_party/googleapis/google/bigtable/v2/bigtable_gapic.yaml b/third_party/googleapis/google/bigtable/v2/bigtable_gapic.yaml
new file mode 100644
index 0000000000..24a067fd5e
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/v2/bigtable_gapic.yaml
@@ -0,0 +1,137 @@
+type: com.google.api.codegen.ConfigProto
+language_settings:
+ java:
+ package_name: com.google.cloud.bigtable.spi.v2
+ python:
+ package_name: google.cloud.gapic.bigtable.v2
+ go:
+ package_name: cloud.google.com/go/bigtable/apiv2
+ csharp:
+ package_name: Google.Bigtable.V2
+ ruby:
+ package_name: Google::Cloud::Bigtable::V2
+ php:
+ package_name: Google\Cloud\Bigtable\V2
+ nodejs:
+ package_name: bigtable.v2
+ domain_layer_location: google-cloud
+license_header:
+ copyright_file: copyright-google.txt
+ license_file: license-header-apache-2.0.txt
+interfaces:
+- name: google.bigtable.v2.Bigtable
+ collections:
+ - name_pattern: projects/{project}/instances/{instance}/tables/{table}
+ entity_name: table
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 20000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 20000
+ total_timeout_millis: 600000
+ methods:
+ - name: ReadRows
+ flattening:
+ groups:
+ - parameters:
+ - table_name
+ required_fields:
+ - table_name
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ table_name: table
+ timeout_millis: 60000
+ - name: SampleRowKeys
+ flattening:
+ groups:
+ - parameters:
+ - table_name
+ required_fields:
+ - table_name
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ table_name: table
+ timeout_millis: 60000
+ - name: MutateRow
+ flattening:
+ groups:
+ - parameters:
+ - table_name
+ - row_key
+ - mutations
+ required_fields:
+ - table_name
+ - row_key
+ - mutations
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ table_name: table
+ timeout_millis: 60000
+ - name: MutateRows
+ flattening:
+ groups:
+ - parameters:
+ - table_name
+ - entries
+ required_fields:
+ - table_name
+ - entries
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ table_name: table
+ timeout_millis: 60000
+ - name: CheckAndMutateRow
+ flattening:
+ groups:
+ - parameters:
+ - table_name
+ - row_key
+ - true_mutations
+ - false_mutations
+ # Note that one of {true_mutations,false_mutations} must be specified, but
+ # since they are not both required, we leave them as optional params.
+ required_fields:
+ - table_name
+ - row_key
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ table_name: table
+ timeout_millis: 60000
+ - name: ReadModifyWriteRow
+ flattening:
+ groups:
+ - parameters:
+ - table_name
+ - row_key
+ - rules
+ required_fields:
+ - table_name
+ - row_key
+ - rules
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ table_name: table
+ timeout_millis: 60000
diff --git a/third_party/googleapis/google/bigtable/v2/data.proto b/third_party/googleapis/google/bigtable/v2/data.proto
new file mode 100644
index 0000000000..b9eab6f7c1
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/v2/data.proto
@@ -0,0 +1,533 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.v2;
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/v2;bigtable";
+option java_multiple_files = true;
+option java_outer_classname = "DataProto";
+option java_package = "com.google.bigtable.v2";
+
+
+// Specifies the complete (requested) contents of a single row of a table.
+// Rows which exceed 256MiB in size cannot be read in full.
+message Row {
+ // The unique key which identifies this row within its table. This is the same
+ // key that's used to identify the row in, for example, a MutateRowRequest.
+ // May contain any non-empty byte string up to 4KiB in length.
+ bytes key = 1;
+
+ // May be empty, but only if the entire row is empty.
+ // The mutual ordering of column families is not specified.
+ repeated Family families = 2;
+}
+
+// Specifies (some of) the contents of a single row/column family intersection
+// of a table.
+message Family {
+ // The unique key which identifies this family within its row. This is the
+ // same key that's used to identify the family in, for example, a RowFilter
+ // which sets its "family_name_regex_filter" field.
+ // Must match `[-_.a-zA-Z0-9]+`, except that AggregatingRowProcessors may
+ // produce cells in a sentinel family with an empty name.
+ // Must be no greater than 64 characters in length.
+ string name = 1;
+
+ // Must not be empty. Sorted in order of increasing "qualifier".
+ repeated Column columns = 2;
+}
+
+// Specifies (some of) the contents of a single row/column intersection of a
+// table.
+message Column {
+ // The unique key which identifies this column within its family. This is the
+ // same key that's used to identify the column in, for example, a RowFilter
+ // which sets its `column_qualifier_regex_filter` field.
+ // May contain any byte string, including the empty string, up to 16kiB in
+ // length.
+ bytes qualifier = 1;
+
+ // Must not be empty. Sorted in order of decreasing "timestamp_micros".
+ repeated Cell cells = 2;
+}
+
+// Specifies (some of) the contents of a single row/column/timestamp of a table.
+message Cell {
+ // The cell's stored timestamp, which also uniquely identifies it within
+ // its column.
+ // Values are always expressed in microseconds, but individual tables may set
+ // a coarser granularity to further restrict the allowed values. For
+ // example, a table which specifies millisecond granularity will only allow
+ // values of `timestamp_micros` which are multiples of 1000.
+ int64 timestamp_micros = 1;
+
+ // The value stored in the cell.
+ // May contain any byte string, including the empty string, up to 100MiB in
+ // length.
+ bytes value = 2;
+
+ // Labels applied to the cell by a [RowFilter][google.bigtable.v2.RowFilter].
+ repeated string labels = 3;
+}
+
+// Specifies a contiguous range of rows.
+message RowRange {
+ // The row key at which to start the range.
+ // If neither field is set, interpreted as the empty string, inclusive.
+ oneof start_key {
+ // Used when giving an inclusive lower bound for the range.
+ bytes start_key_closed = 1;
+
+ // Used when giving an exclusive lower bound for the range.
+ bytes start_key_open = 2;
+ }
+
+ // The row key at which to end the range.
+ // If neither field is set, interpreted as the infinite row key, exclusive.
+ oneof end_key {
+ // Used when giving an exclusive upper bound for the range.
+ bytes end_key_open = 3;
+
+ // Used when giving an inclusive upper bound for the range.
+ bytes end_key_closed = 4;
+ }
+}
+
+// Specifies a non-contiguous set of rows.
+message RowSet {
+ // Single rows included in the set.
+ repeated bytes row_keys = 1;
+
+ // Contiguous row ranges included in the set.
+ repeated RowRange row_ranges = 2;
+}
+
+// Specifies a contiguous range of columns within a single column family.
+// The range spans from &lt;column_family&gt;:&lt;start_qualifier&gt; to
+// &lt;column_family&gt;:&lt;end_qualifier&gt;, where both bounds can be either
+// inclusive or exclusive.
+message ColumnRange {
+ // The name of the column family within which this range falls.
+ string family_name = 1;
+
+ // The column qualifier at which to start the range (within `column_family`).
+ // If neither field is set, interpreted as the empty string, inclusive.
+ oneof start_qualifier {
+ // Used when giving an inclusive lower bound for the range.
+ bytes start_qualifier_closed = 2;
+
+ // Used when giving an exclusive lower bound for the range.
+ bytes start_qualifier_open = 3;
+ }
+
+ // The column qualifier at which to end the range (within `column_family`).
+ // If neither field is set, interpreted as the infinite string, exclusive.
+ oneof end_qualifier {
+ // Used when giving an inclusive upper bound for the range.
+ bytes end_qualifier_closed = 4;
+
+ // Used when giving an exclusive upper bound for the range.
+ bytes end_qualifier_open = 5;
+ }
+}
+
+// Specified a contiguous range of microsecond timestamps.
+message TimestampRange {
+ // Inclusive lower bound. If left empty, interpreted as 0.
+ int64 start_timestamp_micros = 1;
+
+ // Exclusive upper bound. If left empty, interpreted as infinity.
+ int64 end_timestamp_micros = 2;
+}
+
+// Specifies a contiguous range of raw byte values.
+message ValueRange {
+ // The value at which to start the range.
+ // If neither field is set, interpreted as the empty string, inclusive.
+ oneof start_value {
+ // Used when giving an inclusive lower bound for the range.
+ bytes start_value_closed = 1;
+
+ // Used when giving an exclusive lower bound for the range.
+ bytes start_value_open = 2;
+ }
+
+ // The value at which to end the range.
+ // If neither field is set, interpreted as the infinite string, exclusive.
+ oneof end_value {
+ // Used when giving an inclusive upper bound for the range.
+ bytes end_value_closed = 3;
+
+ // Used when giving an exclusive upper bound for the range.
+ bytes end_value_open = 4;
+ }
+}
+
+// Takes a row as input and produces an alternate view of the row based on
+// specified rules. For example, a RowFilter might trim down a row to include
+// just the cells from columns matching a given regular expression, or might
+// return all the cells of a row but not their values. More complicated filters
+// can be composed out of these components to express requests such as, "within
+// every column of a particular family, give just the two most recent cells
+// which are older than timestamp X."
+//
+// There are two broad categories of RowFilters (true filters and transformers),
+// as well as two ways to compose simple filters into more complex ones
+// (chains and interleaves). They work as follows:
+//
+// * True filters alter the input row by excluding some of its cells wholesale
+// from the output row. An example of a true filter is the `value_regex_filter`,
+// which excludes cells whose values don't match the specified pattern. All
+// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax)
+// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An
+// important point to keep in mind is that `RE2(.)` is equivalent by default to
+// `RE2([^\n])`, meaning that it does not match newlines. When attempting to
+// match an arbitrary byte, you should therefore use the escape sequence `\C`,
+// which may need to be further escaped as `\\C` in your client language.
+//
+// * Transformers alter the input row by changing the values of some of its
+// cells in the output, without excluding them completely. Currently, the only
+// supported transformer is the `strip_value_transformer`, which replaces every
+// cell's value with the empty string.
+//
+// * Chains and interleaves are described in more detail in the
+// RowFilter.Chain and RowFilter.Interleave documentation.
+//
+// The total serialized size of a RowFilter message must not
+// exceed 4096 bytes, and RowFilters may not be nested within each other
+// (in Chains or Interleaves) to a depth of more than 20.
+message RowFilter {
+ // A RowFilter which sends rows through several RowFilters in sequence.
+ message Chain {
+ // The elements of "filters" are chained together to process the input row:
+ // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row
+ // The full chain is executed atomically.
+ repeated RowFilter filters = 1;
+ }
+
+ // A RowFilter which sends each row to each of several component
+ // RowFilters and interleaves the results.
+ message Interleave {
+ // The elements of "filters" all process a copy of the input row, and the
+ // results are pooled, sorted, and combined into a single output row.
+ // If multiple cells are produced with the same column and timestamp,
+ // they will all appear in the output row in an unspecified mutual order.
+ // Consider the following example, with three filters:
+ //
+ // input row
+ // |
+ // -----------------------------------------------------
+ // | | |
+ // f(0) f(1) f(2)
+ // | | |
+ // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a
+ // 2: foo,blah,11,z far,blah,5,x far,blah,5,x
+ // | | |
+ // -----------------------------------------------------
+ // |
+ // 1: foo,bar,10,z // could have switched with #2
+ // 2: foo,bar,10,x // could have switched with #1
+ // 3: foo,blah,11,z
+ // 4: far,bar,7,a
+ // 5: far,blah,5,x // identical to #6
+ // 6: far,blah,5,x // identical to #5
+ //
+ // All interleaved filters are executed atomically.
+ repeated RowFilter filters = 1;
+ }
+
+ // A RowFilter which evaluates one of two possible RowFilters, depending on
+ // whether or not a predicate RowFilter outputs any cells from the input row.
+ //
+ // IMPORTANT NOTE: The predicate filter does not execute atomically with the
+ // true and false filters, which may lead to inconsistent or unexpected
+ // results. Additionally, Condition filters have poor performance, especially
+ // when filters are set for the false condition.
+ message Condition {
+ // If `predicate_filter` outputs any cells, then `true_filter` will be
+ // evaluated on the input row. Otherwise, `false_filter` will be evaluated.
+ RowFilter predicate_filter = 1;
+
+ // The filter to apply to the input row if `predicate_filter` returns any
+ // results. If not provided, no results will be returned in the true case.
+ RowFilter true_filter = 2;
+
+ // The filter to apply to the input row if `predicate_filter` does not
+ // return any results. If not provided, no results will be returned in the
+ // false case.
+ RowFilter false_filter = 3;
+ }
+
+ // Which of the possible RowFilter types to apply. If none are set, this
+ // RowFilter returns all cells in the input row.
+ oneof filter {
+ // Applies several RowFilters to the data in sequence, progressively
+ // narrowing the results.
+ Chain chain = 1;
+
+ // Applies several RowFilters to the data in parallel and combines the
+ // results.
+ Interleave interleave = 2;
+
+ // Applies one of two possible RowFilters to the data based on the output of
+ // a predicate RowFilter.
+ Condition condition = 3;
+
+ // ADVANCED USE ONLY.
+ // Hook for introspection into the RowFilter. Outputs all cells directly to
+ // the output of the read rather than to any parent filter. Consider the
+ // following example:
+ //
+ // Chain(
+ // FamilyRegex("A"),
+ // Interleave(
+ // All(),
+ // Chain(Label("foo"), Sink())
+ // ),
+ // QualifierRegex("B")
+ // )
+ //
+ // A,A,1,w
+ // A,B,2,x
+ // B,B,4,z
+ // |
+ // FamilyRegex("A")
+ // |
+ // A,A,1,w
+ // A,B,2,x
+ // |
+ // +------------+-------------+
+ // | |
+ // All() Label(foo)
+ // | |
+ // A,A,1,w A,A,1,w,labels:[foo]
+ // A,B,2,x A,B,2,x,labels:[foo]
+ // | |
+ // | Sink() --------------+
+ // | | |
+ // +------------+ x------+ A,A,1,w,labels:[foo]
+ // | A,B,2,x,labels:[foo]
+ // A,A,1,w |
+ // A,B,2,x |
+ // | |
+ // QualifierRegex("B") |
+ // | |
+ // A,B,2,x |
+ // | |
+ // +--------------------------------+
+ // |
+ // A,A,1,w,labels:[foo]
+ // A,B,2,x,labels:[foo] // could be switched
+ // A,B,2,x // could be switched
+ //
+ // Despite being excluded by the qualifier filter, a copy of every cell
+ // that reaches the sink is present in the final result.
+ //
+ // As with an [Interleave][google.bigtable.v2.RowFilter.Interleave],
+ // duplicate cells are possible, and appear in an unspecified mutual order.
+ // In this case we have a duplicate with column "A:B" and timestamp 2,
+ // because one copy passed through the all filter while the other was
+ // passed through the label and sink. Note that one copy has label "foo",
+ // while the other does not.
+ //
+ // Cannot be used within the `predicate_filter`, `true_filter`, or
+ // `false_filter` of a [Condition][google.bigtable.v2.RowFilter.Condition].
+ bool sink = 16;
+
+ // Matches all cells, regardless of input. Functionally equivalent to
+ // leaving `filter` unset, but included for completeness.
+ bool pass_all_filter = 17;
+
+ // Does not match any cells, regardless of input. Useful for temporarily
+ // disabling just part of a filter.
+ bool block_all_filter = 18;
+
+ // Matches only cells from rows whose keys satisfy the given RE2 regex. In
+ // other words, passes through the entire row when the key matches, and
+ // otherwise produces an empty row.
+ // Note that, since row keys can contain arbitrary bytes, the `\C` escape
+ // sequence must be used if a true wildcard is desired. The `.` character
+ // will not match the new line character `\n`, which may be present in a
+ // binary key.
+ bytes row_key_regex_filter = 4;
+
+ // Matches all cells from a row with probability p, and matches no cells
+ // from the row with probability 1-p.
+ double row_sample_filter = 14;
+
+ // Matches only cells from columns whose families satisfy the given RE2
+ // regex. For technical reasons, the regex must not contain the `:`
+ // character, even if it is not being used as a literal.
+ // Note that, since column families cannot contain the new line character
+ // `\n`, it is sufficient to use `.` as a full wildcard when matching
+ // column family names.
+ string family_name_regex_filter = 5;
+
+ // Matches only cells from columns whose qualifiers satisfy the given RE2
+ // regex.
+ // Note that, since column qualifiers can contain arbitrary bytes, the `\C`
+ // escape sequence must be used if a true wildcard is desired. The `.`
+ // character will not match the new line character `\n`, which may be
+ // present in a binary qualifier.
+ bytes column_qualifier_regex_filter = 6;
+
+ // Matches only cells from columns within the given range.
+ ColumnRange column_range_filter = 7;
+
+ // Matches only cells with timestamps within the given range.
+ TimestampRange timestamp_range_filter = 8;
+
+ // Matches only cells with values that satisfy the given regular expression.
+ // Note that, since cell values can contain arbitrary bytes, the `\C` escape
+ // sequence must be used if a true wildcard is desired. The `.` character
+ // will not match the new line character `\n`, which may be present in a
+ // binary value.
+ bytes value_regex_filter = 9;
+
+ // Matches only cells with values that fall within the given range.
+ ValueRange value_range_filter = 15;
+
+ // Skips the first N cells of each row, matching all subsequent cells.
+ // If duplicate cells are present, as is possible when using an Interleave,
+ // each copy of the cell is counted separately.
+ int32 cells_per_row_offset_filter = 10;
+
+ // Matches only the first N cells of each row.
+ // If duplicate cells are present, as is possible when using an Interleave,
+ // each copy of the cell is counted separately.
+ int32 cells_per_row_limit_filter = 11;
+
+ // Matches only the most recent N cells within each column. For example,
+ // if N=2, this filter would match column `foo:bar` at timestamps 10 and 9,
+ // skip all earlier cells in `foo:bar`, and then begin matching again in
+ // column `foo:bar2`.
+ // If duplicate cells are present, as is possible when using an Interleave,
+ // each copy of the cell is counted separately.
+ int32 cells_per_column_limit_filter = 12;
+
+ // Replaces each cell's value with the empty string.
+ bool strip_value_transformer = 13;
+
+ // Applies the given label to all cells in the output row. This allows
+ // the client to determine which results were produced from which part of
+ // the filter.
+ //
+ // Values must be at most 15 characters in length, and match the RE2
+ // pattern `[a-z0-9\\-]+`
+ //
+ // Due to a technical limitation, it is not currently possible to apply
+ // multiple labels to a cell. As a result, a Chain may have no more than
+ // one sub-filter which contains a `apply_label_transformer`. It is okay for
+ // an Interleave to contain multiple `apply_label_transformers`, as they
+ // will be applied to separate copies of the input. This may be relaxed in
+ // the future.
+ string apply_label_transformer = 19;
+ }
+}
+
+// Specifies a particular change to be made to the contents of a row.
+message Mutation {
+ // A Mutation which sets the value of the specified cell.
+ message SetCell {
+ // The name of the family into which new data should be written.
+ // Must match `[-_.a-zA-Z0-9]+`
+ string family_name = 1;
+
+ // The qualifier of the column into which new data should be written.
+ // Can be any byte string, including the empty string.
+ bytes column_qualifier = 2;
+
+ // The timestamp of the cell into which new data should be written.
+ // Use -1 for current Bigtable server time.
+ // Otherwise, the client should set this value itself, noting that the
+ // default value is a timestamp of zero if the field is left unspecified.
+ // Values must match the granularity of the table (e.g. micros, millis).
+ int64 timestamp_micros = 3;
+
+ // The value to be written into the specified cell.
+ bytes value = 4;
+ }
+
+ // A Mutation which deletes cells from the specified column, optionally
+ // restricting the deletions to a given timestamp range.
+ message DeleteFromColumn {
+ // The name of the family from which cells should be deleted.
+ // Must match `[-_.a-zA-Z0-9]+`
+ string family_name = 1;
+
+ // The qualifier of the column from which cells should be deleted.
+ // Can be any byte string, including the empty string.
+ bytes column_qualifier = 2;
+
+ // The range of timestamps within which cells should be deleted.
+ TimestampRange time_range = 3;
+ }
+
+ // A Mutation which deletes all cells from the specified column family.
+ message DeleteFromFamily {
+ // The name of the family from which cells should be deleted.
+ // Must match `[-_.a-zA-Z0-9]+`
+ string family_name = 1;
+ }
+
+ // A Mutation which deletes all cells from the containing row.
+ message DeleteFromRow {
+
+ }
+
+ // Which of the possible Mutation types to apply.
+ oneof mutation {
+ // Set a cell's value.
+ SetCell set_cell = 1;
+
+ // Deletes cells from a column.
+ DeleteFromColumn delete_from_column = 2;
+
+ // Deletes cells from a column family.
+ DeleteFromFamily delete_from_family = 3;
+
+ // Deletes cells from the entire row.
+ DeleteFromRow delete_from_row = 4;
+ }
+}
+
+// Specifies an atomic read/modify/write operation on the latest value of the
+// specified column.
+message ReadModifyWriteRule {
+ // The name of the family to which the read/modify/write should be applied.
+ // Must match `[-_.a-zA-Z0-9]+`
+ string family_name = 1;
+
+ // The qualifier of the column to which the read/modify/write should be
+ // applied.
+ // Can be any byte string, including the empty string.
+ bytes column_qualifier = 2;
+
+ // The rule used to determine the column's new latest value from its current
+ // latest value.
+ oneof rule {
+ // Rule specifying that `append_value` be appended to the existing value.
+ // If the targeted cell is unset, it will be treated as containing the
+ // empty string.
+ bytes append_value = 3;
+
+ // Rule specifying that `increment_amount` be added to the existing value.
+ // If the targeted cell is unset, it will be treated as containing a zero.
+ // Otherwise, the targeted cell must contain an 8-byte value (interpreted
+ // as a 64-bit big-endian signed integer), or the entire request will fail.
+ int64 increment_amount = 4;
+ }
+}
diff --git a/third_party/googleapis/google/bytestream/bytestream.proto b/third_party/googleapis/google/bytestream/bytestream.proto
new file mode 100644
index 0000000000..85e386fc2b
--- /dev/null
+++ b/third_party/googleapis/google/bytestream/bytestream.proto
@@ -0,0 +1,181 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bytestream;
+
+import "google/api/annotations.proto";
+import "google/protobuf/wrappers.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bytestream;bytestream";
+option java_outer_classname = "ByteStreamProto";
+option java_package = "com.google.bytestream";
+
+
+// #### Introduction
+//
+// The Byte Stream API enables a client to read and write a stream of bytes to
+// and from a resource. Resources have names, and these names are supplied in
+// the API calls below to identify the resource that is being read from or
+// written to.
+//
+// All implementations of the Byte Stream API export the interface defined here:
+//
+// * `Read()`: Reads the contents of a resource.
+//
+// * `Write()`: Writes the contents of a resource. The client can call `Write()`
+// multiple times with the same resource and can check the status of the write
+// by calling `QueryWriteStatus()`.
+//
+// #### Service parameters and metadata
+//
+// The ByteStream API provides no direct way to access/modify any metadata
+// associated with the resource.
+//
+// #### Errors
+//
+// The errors returned by the service are in the Google canonical error space.
+service ByteStream {
+ // `Read()` is used to retrieve the contents of a resource as a sequence
+ // of bytes. The bytes are returned in a sequence of responses, and the
+ // responses are delivered as the results of a server-side streaming RPC.
+ rpc Read(ReadRequest) returns (stream ReadResponse);
+
+ // `Write()` is used to send the contents of a resource as a sequence of
+ // bytes. The bytes are sent in a sequence of request protos of a client-side
+ // streaming RPC.
+ //
+ // A `Write()` action is resumable. If there is an error or the connection is
+ // broken during the `Write()`, the client should check the status of the
+ // `Write()` by calling `QueryWriteStatus()` and continue writing from the
+ // returned `committed_size`. This may be less than the amount of data the
+ // client previously sent.
+ //
+ // Calling `Write()` on a resource name that was previously written and
+ // finalized could cause an error, depending on whether the underlying service
+ // allows over-writing of previously written resources.
+ //
+ // When the client closes the request channel, the service will respond with
+ // a `WriteResponse`. The service will not view the resource as `complete`
+ // until the client has sent a `WriteRequest` with `finish_write` set to
+ // `true`. Sending any requests on a stream after sending a request with
+ // `finish_write` set to `true` will cause an error. The client **should**
+ // check the `WriteResponse` it receives to determine how much data the
+ // service was able to commit and whether the service views the resource as
+ // `complete` or not.
+ rpc Write(stream WriteRequest) returns (WriteResponse);
+
+ // `QueryWriteStatus()` is used to find the `committed_size` for a resource
+ // that is being written, which can then be used as the `write_offset` for
+ // the next `Write()` call.
+ //
+ // If the resource does not exist (i.e., the resource has been deleted, or the
+ // first `Write()` has not yet reached the service), this method returns the
+ // error `NOT_FOUND`.
+ //
+ // The client **may** call `QueryWriteStatus()` at any time to determine how
+ // much data has been processed for this resource. This is useful if the
+ // client is buffering data and needs to know which data can be safely
+ // evicted. For any sequence of `QueryWriteStatus()` calls for a given
+ // resource name, the sequence of returned `committed_size` values will be
+ // non-decreasing.
+ rpc QueryWriteStatus(QueryWriteStatusRequest) returns (QueryWriteStatusResponse);
+}
+
+// Request object for ByteStream.Read.
+message ReadRequest {
+ // The name of the resource to read.
+ string resource_name = 1;
+
+ // The offset for the first byte to return in the read, relative to the start
+ // of the resource.
+ //
+ // A `read_offset` that is negative or greater than the size of the resource
+ // will cause an `OUT_OF_RANGE` error.
+ int64 read_offset = 2;
+
+ // The maximum number of `data` bytes the server is allowed to return in the
+ // sum of all `ReadResponse` messages. A `read_limit` of zero indicates that
+ // there is no limit, and a negative `read_limit` will cause an error.
+ //
+ // If the stream returns fewer bytes than allowed by the `read_limit` and no
+ // error occurred, the stream includes all data from the `read_offset` to the
+ // end of the resource.
+ int64 read_limit = 3;
+}
+
+// Response object for ByteStream.Read.
+message ReadResponse {
+ // A portion of the data for the resource. The service **may** leave `data`
+ // empty for any given `ReadResponse`. This enables the service to inform the
+ // client that the request is still live while it is running an operation to
+ // generate more data.
+ bytes data = 10;
+}
+
+// Request object for ByteStream.Write.
+message WriteRequest {
+ // The name of the resource to write. This **must** be set on the first
+ // `WriteRequest` of each `Write()` action. If it is set on subsequent calls,
+ // it **must** match the value of the first request.
+ string resource_name = 1;
+
+ // The offset from the beginning of the resource at which the data should be
+ // written. It is required on all `WriteRequest`s.
+ //
+ // In the first `WriteRequest` of a `Write()` action, it indicates
+ // the initial offset for the `Write()` call. The value **must** be equal to
+ // the `committed_size` that a call to `QueryWriteStatus()` would return.
+ //
+ // On subsequent calls, this value **must** be set and **must** be equal to
+ // the sum of the first `write_offset` and the sizes of all `data` bundles
+ // sent previously on this stream.
+ //
+ // An incorrect value will cause an error.
+ int64 write_offset = 2;
+
+ // If `true`, this indicates that the write is complete. Sending any
+ // `WriteRequest`s subsequent to one in which `finish_write` is `true` will
+ // cause an error.
+ bool finish_write = 3;
+
+ // A portion of the data for the resource. The client **may** leave `data`
+ // empty for any given `WriteRequest`. This enables the client to inform the
+ // service that the request is still live while it is running an operation to
+ // generate more data.
+ bytes data = 10;
+}
+
+// Response object for ByteStream.Write.
+message WriteResponse {
+ // The number of bytes that have been processed for the given resource.
+ int64 committed_size = 1;
+}
+
+// Request object for ByteStream.QueryWriteStatus.
+message QueryWriteStatusRequest {
+ // The name of the resource whose write status is being requested.
+ string resource_name = 1;
+}
+
+// Response object for ByteStream.QueryWriteStatus.
+message QueryWriteStatusResponse {
+ // The number of bytes that have been processed for the given resource.
+ int64 committed_size = 1;
+
+ // `complete` is `true` only if the client has sent a `WriteRequest` with
+ // `finish_write` set to true, and the server has processed that request.
+ bool complete = 2;
+}
diff --git a/third_party/googleapis/google/cloud/audit/audit_log.proto b/third_party/googleapis/google/cloud/audit/audit_log.proto
new file mode 100644
index 0000000000..eab66a1f9c
--- /dev/null
+++ b/third_party/googleapis/google/cloud/audit/audit_log.proto
@@ -0,0 +1,128 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.audit;
+
+import "google/api/annotations.proto";
+import "google/protobuf/any.proto";
+import "google/protobuf/struct.proto";
+import "google/rpc/status.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/audit;audit";
+option java_multiple_files = true;
+option java_outer_classname = "AuditLogProto";
+option java_package = "com.google.cloud.audit";
+
+
+// Common audit log format for Google Cloud Platform API operations.
+message AuditLog {
+ // The name of the API service performing the operation. For example,
+ // `"datastore.googleapis.com"`.
+ string service_name = 7;
+
+ // The name of the service method or operation.
+ // For API calls, this should be the name of the API method.
+ // For example,
+ //
+ // "google.datastore.v1.Datastore.RunQuery"
+ // "google.logging.v1.LoggingService.DeleteLog"
+ string method_name = 8;
+
+ // The resource or collection that is the target of the operation.
+ // The name is a scheme-less URI, not including the API service name.
+ // For example:
+ //
+ // "shelves/SHELF_ID/books"
+ // "shelves/SHELF_ID/books/BOOK_ID"
+ string resource_name = 11;
+
+ // The number of items returned from a List or Query API method,
+ // if applicable.
+ int64 num_response_items = 12;
+
+ // The status of the overall operation.
+ google.rpc.Status status = 2;
+
+ // Authentication information.
+ AuthenticationInfo authentication_info = 3;
+
+ // Authorization information. If there are multiple
+ // resources or permissions involved, then there is
+ // one AuthorizationInfo element for each {resource, permission} tuple.
+ repeated AuthorizationInfo authorization_info = 9;
+
+ // Metadata about the operation.
+ RequestMetadata request_metadata = 4;
+
+ // The operation request. This may not include all request parameters,
+ // such as those that are too large, privacy-sensitive, or duplicated
+ // elsewhere in the log record.
+ // It should never include user-generated data, such as file contents.
+ // When the JSON object represented here has a proto equivalent, the proto
+ // name will be indicated in the `@type` property.
+ google.protobuf.Struct request = 16;
+
+ // The operation response. This may not include all response elements,
+ // such as those that are too large, privacy-sensitive, or duplicated
+ // elsewhere in the log record.
+ // It should never include user-generated data, such as file contents.
+ // When the JSON object represented here has a proto equivalent, the proto
+ // name will be indicated in the `@type` property.
+ google.protobuf.Struct response = 17;
+
+ // Other service-specific data about the request, response, and other
+ // activities.
+ google.protobuf.Any service_data = 15;
+}
+
+// Authentication information for the operation.
+message AuthenticationInfo {
+ // The email address of the authenticated user making the request.
+ string principal_email = 1;
+}
+
+// Authorization information for the operation.
+message AuthorizationInfo {
+ // The resource being accessed, as a REST-style string. For example:
+ //
+ // bigquery.googlapis.com/projects/PROJECTID/datasets/DATASETID
+ string resource = 1;
+
+ // The required IAM permission.
+ string permission = 2;
+
+ // Whether or not authorization for `resource` and `permission`
+ // was granted.
+ bool granted = 3;
+}
+
+// Metadata about the request.
+message RequestMetadata {
+ // The IP address of the caller.
+ string caller_ip = 1;
+
+ // The user agent of the caller.
+ // This information is not authenticated and should be treated accordingly.
+ // For example:
+ //
+ // + `google-api-python-client/1.4.0`:
+ // The request was made by the Google API client for Python.
+ // + `Cloud SDK Command Line Tool apitools-client/1.0 gcloud/0.9.62`:
+ // The request was made by the Google Cloud SDK CLI (gcloud).
+ // + `AppEngine-Google; (+http://code.google.com/appengine; appid: s~my-project`:
+ // The request was made from the `my-project` App Engine app.
+ string caller_supplied_user_agent = 2;
+}
diff --git a/third_party/googleapis/google/cloud/billing/v1/cloud_billing.proto b/third_party/googleapis/google/cloud/billing/v1/cloud_billing.proto
new file mode 100644
index 0000000000..9ce199863d
--- /dev/null
+++ b/third_party/googleapis/google/cloud/billing/v1/cloud_billing.proto
@@ -0,0 +1,214 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.billing.v1;
+
+import "google/api/annotations.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/billing/v1;billing";
+option java_multiple_files = true;
+option java_outer_classname = "CloudBillingProto";
+option java_package = "com.google.cloud.billing.v1";
+
+
+// Retrieves Google Cloud Console billing accounts and associates them with
+// projects.
+service CloudBilling {
+ // Gets information about a billing account. The current authenticated user
+ // must be an [owner of the billing
+ // account](https://support.google.com/cloud/answer/4430947).
+ rpc GetBillingAccount(GetBillingAccountRequest) returns (BillingAccount) {
+ option (google.api.http) = { get: "/v1/{name=billingAccounts/*}" };
+ }
+
+ // Lists the billing accounts that the current authenticated user
+ // [owns](https://support.google.com/cloud/answer/4430947).
+ rpc ListBillingAccounts(ListBillingAccountsRequest) returns (ListBillingAccountsResponse) {
+ option (google.api.http) = { get: "/v1/billingAccounts" };
+ }
+
+ // Lists the projects associated with a billing account. The current
+ // authenticated user must be an [owner of the billing
+ // account](https://support.google.com/cloud/answer/4430947).
+ rpc ListProjectBillingInfo(ListProjectBillingInfoRequest) returns (ListProjectBillingInfoResponse) {
+ option (google.api.http) = { get: "/v1/{name=billingAccounts/*}/projects" };
+ }
+
+ // Gets the billing information for a project. The current authenticated user
+ // must have [permission to view the
+ // project](https://cloud.google.com/docs/permissions-overview#h.bgs0oxofvnoo
+ // ).
+ rpc GetProjectBillingInfo(GetProjectBillingInfoRequest) returns (ProjectBillingInfo) {
+ option (google.api.http) = { get: "/v1/{name=projects/*}/billingInfo" };
+ }
+
+ // Sets or updates the billing account associated with a project. You specify
+ // the new billing account by setting the `billing_account_name` in the
+ // `ProjectBillingInfo` resource to the resource name of a billing account.
+ // Associating a project with an open billing account enables billing on the
+ // project and allows charges for resource usage. If the project already had a
+ // billing account, this method changes the billing account used for resource
+ // usage charges.
+ //
+ // *Note:* Incurred charges that have not yet been reported in the transaction
+ // history of the Google Cloud Console may be billed to the new billing
+ // account, even if the charge occurred before the new billing account was
+ // assigned to the project.
+ //
+ // The current authenticated user must have ownership privileges for both the
+ // [project](https://cloud.google.com/docs/permissions-overview#h.bgs0oxofvnoo
+ // ) and the [billing
+ // account](https://support.google.com/cloud/answer/4430947).
+ //
+ // You can disable billing on the project by setting the
+ // `billing_account_name` field to empty. This action disassociates the
+ // current billing account from the project. Any billable activity of your
+ // in-use services will stop, and your application could stop functioning as
+ // expected. Any unbilled charges to date will be billed to the previously
+ // associated account. The current authenticated user must be either an owner
+ // of the project or an owner of the billing account for the project.
+ //
+ // Note that associating a project with a *closed* billing account will have
+ // much the same effect as disabling billing on the project: any paid
+ // resources used by the project will be shut down. Thus, unless you wish to
+ // disable billing, you should always call this method with the name of an
+ // *open* billing account.
+ rpc UpdateProjectBillingInfo(UpdateProjectBillingInfoRequest) returns (ProjectBillingInfo) {
+ option (google.api.http) = { put: "/v1/{name=projects/*}/billingInfo" body: "project_billing_info" };
+ }
+}
+
+// A billing account in [Google Cloud
+// Console](https://console.cloud.google.com/). You can assign a billing account
+// to one or more projects.
+message BillingAccount {
+ // The resource name of the billing account. The resource name has the form
+ // `billingAccounts/{billing_account_id}`. For example,
+ // `billingAccounts/012345-567890-ABCDEF` would be the resource name for
+ // billing account `012345-567890-ABCDEF`.
+ string name = 1;
+
+ // True if the billing account is open, and will therefore be charged for any
+ // usage on associated projects. False if the billing account is closed, and
+ // therefore projects associated with it will be unable to use paid services.
+ bool open = 2;
+
+ // The display name given to the billing account, such as `My Billing
+ // Account`. This name is displayed in the Google Cloud Console.
+ string display_name = 3;
+}
+
+// Encapsulation of billing information for a Cloud Console project. A project
+// has at most one associated billing account at a time (but a billing account
+// can be assigned to multiple projects).
+message ProjectBillingInfo {
+ // The resource name for the `ProjectBillingInfo`; has the form
+ // `projects/{project_id}/billingInfo`. For example, the resource name for the
+ // billing information for project `tokyo-rain-123` would be
+ // `projects/tokyo-rain-123/billingInfo`. This field is read-only.
+ string name = 1;
+
+ // The ID of the project that this `ProjectBillingInfo` represents, such as
+ // `tokyo-rain-123`. This is a convenience field so that you don't need to
+ // parse the `name` field to obtain a project ID. This field is read-only.
+ string project_id = 2;
+
+ // The resource name of the billing account associated with the project, if
+ // any. For example, `billingAccounts/012345-567890-ABCDEF`.
+ string billing_account_name = 3;
+
+ // True if the project is associated with an open billing account, to which
+ // usage on the project is charged. False if the project is associated with a
+ // closed billing account, or no billing account at all, and therefore cannot
+ // use paid services. This field is read-only.
+ bool billing_enabled = 4;
+}
+
+// Request message for `GetBillingAccount`.
+message GetBillingAccountRequest {
+ // The resource name of the billing account to retrieve. For example,
+ // `billingAccounts/012345-567890-ABCDEF`.
+ string name = 1;
+}
+
+// Request message for `ListBillingAccounts`.
+message ListBillingAccountsRequest {
+ // Requested page size. The maximum page size is 100; this is also the
+ // default.
+ int32 page_size = 1;
+
+ // A token identifying a page of results to return. This should be a
+ // `next_page_token` value returned from a previous `ListBillingAccounts`
+ // call. If unspecified, the first page of results is returned.
+ string page_token = 2;
+}
+
+// Response message for `ListBillingAccounts`.
+message ListBillingAccountsResponse {
+ // A list of billing accounts.
+ repeated BillingAccount billing_accounts = 1;
+
+ // A token to retrieve the next page of results. To retrieve the next page,
+ // call `ListBillingAccounts` again with the `page_token` field set to this
+ // value. This field is empty if there are no more results to retrieve.
+ string next_page_token = 2;
+}
+
+// Request message for `ListProjectBillingInfo`.
+message ListProjectBillingInfoRequest {
+ // The resource name of the billing account associated with the projects that
+ // you want to list. For example, `billingAccounts/012345-567890-ABCDEF`.
+ string name = 1;
+
+ // Requested page size. The maximum page size is 100; this is also the
+ // default.
+ int32 page_size = 2;
+
+ // A token identifying a page of results to be returned. This should be a
+ // `next_page_token` value returned from a previous `ListProjectBillingInfo`
+ // call. If unspecified, the first page of results is returned.
+ string page_token = 3;
+}
+
+// Request message for `ListProjectBillingInfoResponse`.
+message ListProjectBillingInfoResponse {
+ // A list of `ProjectBillingInfo` resources representing the projects
+ // associated with the billing account.
+ repeated ProjectBillingInfo project_billing_info = 1;
+
+ // A token to retrieve the next page of results. To retrieve the next page,
+ // call `ListProjectBillingInfo` again with the `page_token` field set to this
+ // value. This field is empty if there are no more results to retrieve.
+ string next_page_token = 2;
+}
+
+// Request message for `GetProjectBillingInfo`.
+message GetProjectBillingInfoRequest {
+ // The resource name of the project for which billing information is
+ // retrieved. For example, `projects/tokyo-rain-123`.
+ string name = 1;
+}
+
+// Request message for `UpdateProjectBillingInfo`.
+message UpdateProjectBillingInfoRequest {
+ // The resource name of the project associated with the billing information
+ // that you want to update. For example, `projects/tokyo-rain-123`.
+ string name = 1;
+
+ // The new billing information for the project. Read-only fields are ignored;
+ // thus, you may leave empty all fields except `billing_account_name`.
+ ProjectBillingInfo project_billing_info = 2;
+}
diff --git a/third_party/googleapis/google/cloud/dataproc/v1/clusters.proto b/third_party/googleapis/google/cloud/dataproc/v1/clusters.proto
new file mode 100644
index 0000000000..fc7f45eadf
--- /dev/null
+++ b/third_party/googleapis/google/cloud/dataproc/v1/clusters.proto
@@ -0,0 +1,444 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.dataproc.v1;
+
+import "google/api/annotations.proto";
+import "google/cloud/dataproc/v1/operations.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc";
+option java_multiple_files = true;
+option java_outer_classname = "ClustersProto";
+option java_package = "com.google.cloud.dataproc.v1";
+
+
+// The ClusterControllerService provides methods to manage clusters
+// of Google Compute Engine instances.
+service ClusterController {
+ // Creates a cluster in a project.
+ rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1/projects/{project_id}/regions/{region}/clusters" body: "cluster" };
+ }
+
+ // Updates a cluster in a project.
+ rpc UpdateCluster(UpdateClusterRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { patch: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}" body: "cluster" };
+ }
+
+ // Deletes a cluster in a project.
+ rpc DeleteCluster(DeleteClusterRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { delete: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}" };
+ }
+
+ // Gets the resource representation for a cluster in a project.
+ rpc GetCluster(GetClusterRequest) returns (Cluster) {
+ option (google.api.http) = { get: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}" };
+ }
+
+ // Lists all regions/{region}/clusters in a project.
+ rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) {
+ option (google.api.http) = { get: "/v1/projects/{project_id}/regions/{region}/clusters" };
+ }
+
+ // Gets cluster diagnostic information.
+ // After the operation completes, the Operation.response field
+ // contains `DiagnoseClusterOutputLocation`.
+ rpc DiagnoseCluster(DiagnoseClusterRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose" body: "*" };
+ }
+}
+
+// Describes the identifying information, config, and status of
+// a cluster of Google Compute Engine instances.
+message Cluster {
+ // [Required] The Google Cloud Platform project ID that the cluster belongs to.
+ string project_id = 1;
+
+ // [Required] The cluster name. Cluster names within a project must be
+ // unique. Names of deleted clusters can be reused.
+ string cluster_name = 2;
+
+ // [Required] The cluster config. Note that Cloud Dataproc may set
+ // default values, and values may change when clusters are updated.
+ ClusterConfig config = 3;
+
+ // [Output-only] Cluster status.
+ ClusterStatus status = 4;
+
+ // [Output-only] The previous cluster status.
+ repeated ClusterStatus status_history = 7;
+
+ // [Output-only] A cluster UUID (Unique Universal Identifier). Cloud Dataproc
+ // generates this value when it creates the cluster.
+ string cluster_uuid = 6;
+}
+
+// The cluster config.
+message ClusterConfig {
+ // [Optional] A Google Cloud Storage staging bucket used for sharing generated
+ // SSH keys and config. If you do not specify a staging bucket, Cloud
+ // Dataproc will determine an appropriate Cloud Storage location (US,
+ // ASIA, or EU) for your cluster's staging bucket according to the Google
+ // Compute Engine zone where your cluster is deployed, and then it will create
+ // and manage this project-level, per-location bucket for you.
+ string config_bucket = 1;
+
+ // [Required] The shared Google Compute Engine config settings for
+ // all instances in a cluster.
+ GceClusterConfig gce_cluster_config = 8;
+
+ // [Optional] The Google Compute Engine config settings for
+ // the master instance in a cluster.
+ InstanceGroupConfig master_config = 9;
+
+ // [Optional] The Google Compute Engine config settings for
+ // worker instances in a cluster.
+ InstanceGroupConfig worker_config = 10;
+
+ // [Optional] The Google Compute Engine config settings for
+ // additional worker instances in a cluster.
+ InstanceGroupConfig secondary_worker_config = 12;
+
+ // [Optional] The config settings for software inside the cluster.
+ SoftwareConfig software_config = 13;
+
+ // [Optional] Commands to execute on each node after config is
+ // completed. By default, executables are run on master and all worker nodes.
+ // You can test a node's <code>role</code> metadata to run an executable on
+ // a master or worker node, as shown below using `curl` (you can also use `wget`):
+ //
+ // ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role)
+ // if [[ "${ROLE}" == 'Master' ]]; then
+ // ... master specific actions ...
+ // else
+ // ... worker specific actions ...
+ // fi
+ repeated NodeInitializationAction initialization_actions = 11;
+}
+
+// Common config settings for resources of Google Compute Engine cluster
+// instances, applicable to all instances in the cluster.
+message GceClusterConfig {
+ // [Required] The zone where the Google Compute Engine cluster will be located.
+ // Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`.
+ string zone_uri = 1;
+
+ // [Optional] The Google Compute Engine network to be used for machine
+ // communications. Cannot be specified with subnetwork_uri. If neither
+ // `network_uri` nor `subnetwork_uri` is specified, the "default" network of
+ // the project is used, if it exists. Cannot be a "Custom Subnet Network" (see
+ // [Using Subnetworks](/compute/docs/subnetworks) for more information).
+ // Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`.
+ string network_uri = 2;
+
+ // [Optional] The Google Compute Engine subnetwork to be used for machine
+ // communications. Cannot be specified with network_uri.
+ // Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0`.
+ string subnetwork_uri = 6;
+
+ // [Optional] If true, all instances in the cluster will only have internal IP
+ // addresses. By default, clusters are not restricted to internal IP addresses,
+ // and will have ephemeral external IP addresses assigned to each instance.
+ // This `internal_ip_only` restriction can only be enabled for subnetwork
+ // enabled networks, and all off-cluster dependencies must be configured to be
+ // accessible without external IP addresses.
+ bool internal_ip_only = 7;
+
+ // [Optional] The URIs of service account scopes to be included in Google
+ // Compute Engine instances. The following base set of scopes is always
+ // included:
+ //
+ // * https://www.googleapis.com/auth/cloud.useraccounts.readonly
+ // * https://www.googleapis.com/auth/devstorage.read_write
+ // * https://www.googleapis.com/auth/logging.write
+ //
+ // If no scopes are specified, the following defaults are also provided:
+ //
+ // * https://www.googleapis.com/auth/bigquery
+ // * https://www.googleapis.com/auth/bigtable.admin.table
+ // * https://www.googleapis.com/auth/bigtable.data
+ // * https://www.googleapis.com/auth/devstorage.full_control
+ repeated string service_account_scopes = 3;
+
+ // The Google Compute Engine tags to add to all instances (see
+ // [Labeling instances](/compute/docs/label-or-tag-resources#labeling_instances)).
+ repeated string tags = 4;
+
+ // The Google Compute Engine metadata entries to add to all instances (see
+ // [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+ map<string, string> metadata = 5;
+}
+
+// [Optional] The config settings for Google Compute Engine resources in
+// an instance group, such as a master or worker group.
+message InstanceGroupConfig {
+ // [Required] The number of VM instances in the instance group.
+ // For master instance groups, must be set to 1.
+ int32 num_instances = 1;
+
+ // [Optional] The list of instance names. Cloud Dataproc derives the names from
+ // `cluster_name`, `num_instances`, and the instance group if not set by user
+ // (recommended practice is to let Cloud Dataproc derive the name).
+ repeated string instance_names = 2;
+
+ // [Output-only] The Google Compute Engine image resource used for cluster
+ // instances. Inferred from `SoftwareConfig.image_version`.
+ string image_uri = 3;
+
+ // [Required] The Google Compute Engine machine type used for cluster instances.
+ // Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`.
+ string machine_type_uri = 4;
+
+ // [Optional] Disk option config settings.
+ DiskConfig disk_config = 5;
+
+ // [Optional] Specifies that this instance group contains preemptible instances.
+ bool is_preemptible = 6;
+
+ // [Output-only] The config for Google Compute Engine Instance Group
+ // Manager that manages this group.
+ // This is only used for preemptible instance groups.
+ ManagedGroupConfig managed_group_config = 7;
+}
+
+// Specifies the resources used to actively manage an instance group.
+message ManagedGroupConfig {
+ // [Output-only] The name of the Instance Template used for the Managed
+ // Instance Group.
+ string instance_template_name = 1;
+
+ // [Output-only] The name of the Instance Group Manager for this group.
+ string instance_group_manager_name = 2;
+}
+
+// Specifies the config of disk options for a group of VM instances.
+message DiskConfig {
+ // [Optional] Size in GB of the boot disk (default is 500GB).
+ int32 boot_disk_size_gb = 1;
+
+ // [Optional] Number of attached SSDs, from 0 to 4 (default is 0).
+ // If SSDs are not attached, the boot disk is used to store runtime logs and
+ // [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data.
+ // If one or more SSDs are attached, this runtime bulk
+ // data is spread across them, and the boot disk contains only basic
+ // config and installed binaries.
+ int32 num_local_ssds = 2;
+}
+
+// Specifies an executable to run on a fully configured node and a
+// timeout period for executable completion.
+message NodeInitializationAction {
+ // [Required] Google Cloud Storage URI of executable file.
+ string executable_file = 1;
+
+ // [Optional] Amount of time executable has to complete. Default is
+ // 10 minutes. Cluster creation fails with an explanatory error message (the
+ // name of the executable that caused the error and the exceeded timeout
+ // period) if the executable is not completed at end of the timeout period.
+ google.protobuf.Duration execution_timeout = 2;
+}
+
+// The status of a cluster and its instances.
+message ClusterStatus {
+ // The cluster state.
+ enum State {
+ // The cluster state is unknown.
+ UNKNOWN = 0;
+
+ // The cluster is being created and set up. It is not ready for use.
+ CREATING = 1;
+
+ // The cluster is currently running and healthy. It is ready for use.
+ RUNNING = 2;
+
+ // The cluster encountered an error. It is not ready for use.
+ ERROR = 3;
+
+ // The cluster is being deleted. It cannot be used.
+ DELETING = 4;
+
+ // The cluster is being updated. It continues to accept and process jobs.
+ UPDATING = 5;
+ }
+
+ // [Output-only] The cluster's state.
+ State state = 1;
+
+ // [Output-only] Optional details of cluster's state.
+ string detail = 2;
+
+ // [Output-only] Time when this state was entered.
+ google.protobuf.Timestamp state_start_time = 3;
+}
+
+// Specifies the selection and config of software inside the cluster.
+message SoftwareConfig {
+ // [Optional] The version of software inside the cluster. It must match the
+ // regular expression `[0-9]+\.[0-9]+`. If unspecified, it defaults to the
+ // latest version (see [Cloud Dataproc Versioning](/dataproc/versioning)).
+ string image_version = 1;
+
+ // [Optional] The properties to set on daemon config files.
+ //
+ // Property keys are specified in `prefix:property` format, such as
+ // `core:fs.defaultFS`. The following are supported prefixes
+ // and their mappings:
+ //
+ // * core: `core-site.xml`
+ // * hdfs: `hdfs-site.xml`
+ // * mapred: `mapred-site.xml`
+ // * yarn: `yarn-site.xml`
+ // * hive: `hive-site.xml`
+ // * pig: `pig.properties`
+ // * spark: `spark-defaults.conf`
+ map<string, string> properties = 2;
+}
+
+// A request to create a cluster.
+message CreateClusterRequest {
+ // [Required] The ID of the Google Cloud Platform project that the cluster
+ // belongs to.
+ string project_id = 1;
+
+ // [Required] The Cloud Dataproc region in which to handle the request.
+ string region = 3;
+
+ // [Required] The cluster to create.
+ Cluster cluster = 2;
+}
+
+// A request to update a cluster.
+message UpdateClusterRequest {
+ // [Required] The ID of the Google Cloud Platform project the
+ // cluster belongs to.
+ string project_id = 1;
+
+ // [Required] The Cloud Dataproc region in which to handle the request.
+ string region = 5;
+
+ // [Required] The cluster name.
+ string cluster_name = 2;
+
+ // [Required] The changes to the cluster.
+ Cluster cluster = 3;
+
+ // [Required] Specifies the path, relative to <code>Cluster</code>, of
+ // the field to update. For example, to change the number of workers
+ // in a cluster to 5, the <code>update_mask</code> parameter would be
+ // specified as <code>config.worker_config.num_instances</code>,
+ // and the `PATCH` request body would specify the new value, as follows:
+ //
+ // {
+ // "config":{
+ // "workerConfig":{
+ // "numInstances":"5"
+ // }
+ // }
+ // }
+ // Similarly, to change the number of preemptible workers in a cluster to 5, the
+ // <code>update_mask</code> parameter would be <code>config.secondary_worker_config.num_instances</code>,
+ // and the `PATCH` request body would be set as follows:
+ //
+ // {
+ // "config":{
+ // "secondaryWorkerConfig":{
+ // "numInstances":"5"
+ // }
+ // }
+ // }
+ // <strong>Note:</strong> Currently, <code>config.worker_config.num_instances</code>
+ // and <code>config.secondary_worker_config.num_instances</code> are the only
+ // fields that can be updated.
+ google.protobuf.FieldMask update_mask = 4;
+}
+
+// A request to delete a cluster.
+message DeleteClusterRequest {
+ // [Required] The ID of the Google Cloud Platform project that the cluster
+ // belongs to.
+ string project_id = 1;
+
+ // [Required] The Cloud Dataproc region in which to handle the request.
+ string region = 3;
+
+ // [Required] The cluster name.
+ string cluster_name = 2;
+}
+
+// Request to get the resource representation for a cluster in a project.
+message GetClusterRequest {
+ // [Required] The ID of the Google Cloud Platform project that the cluster
+ // belongs to.
+ string project_id = 1;
+
+ // [Required] The Cloud Dataproc region in which to handle the request.
+ string region = 3;
+
+ // [Required] The cluster name.
+ string cluster_name = 2;
+}
+
+// A request to list the clusters in a project.
+message ListClustersRequest {
+ // [Required] The ID of the Google Cloud Platform project that the cluster
+ // belongs to.
+ string project_id = 1;
+
+ // [Required] The Cloud Dataproc region in which to handle the request.
+ string region = 4;
+
+ // [Optional] The standard List page size.
+ int32 page_size = 2;
+
+ // [Optional] The standard List page token.
+ string page_token = 3;
+}
+
+// The list of all clusters in a project.
+message ListClustersResponse {
+ // [Output-only] The clusters in the project.
+ repeated Cluster clusters = 1;
+
+ // [Output-only] This token is included in the response if there are more
+ // results to fetch. To fetch additional results, provide this value as the
+ // `page_token` in a subsequent <code>ListClustersRequest</code>.
+ string next_page_token = 2;
+}
+
+// A request to collect cluster diagnostic information.
+message DiagnoseClusterRequest {
+ // [Required] The ID of the Google Cloud Platform project that the cluster
+ // belongs to.
+ string project_id = 1;
+
+ // [Required] The Cloud Dataproc region in which to handle the request.
+ string region = 3;
+
+ // [Required] The cluster name.
+ string cluster_name = 2;
+}
+
+// The location of diagnostic output.
+message DiagnoseClusterResults {
+ // [Output-only] The Google Cloud Storage URI of the diagnostic output.
+ // The output report is a plain text file with a summary of collected
+ // diagnostics.
+ string output_uri = 1;
+}
diff --git a/third_party/googleapis/google/cloud/dataproc/v1/jobs.proto b/third_party/googleapis/google/cloud/dataproc/v1/jobs.proto
new file mode 100644
index 0000000000..854ce9b972
--- /dev/null
+++ b/third_party/googleapis/google/cloud/dataproc/v1/jobs.proto
@@ -0,0 +1,573 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.dataproc.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc";
+option java_multiple_files = true;
+option java_outer_classname = "JobsProto";
+option java_package = "com.google.cloud.dataproc.v1";
+
+
+// The JobController provides methods to manage jobs.
+service JobController {
+ // Submits a job to a cluster.
+ rpc SubmitJob(SubmitJobRequest) returns (Job) {
+ option (google.api.http) = { post: "/v1/projects/{project_id}/regions/{region}/jobs:submit" body: "*" };
+ }
+
+ // Gets the resource representation for a job in a project.
+ rpc GetJob(GetJobRequest) returns (Job) {
+ option (google.api.http) = { get: "/v1/projects/{project_id}/regions/{region}/jobs/{job_id}" };
+ }
+
+ // Lists regions/{region}/jobs in a project.
+ rpc ListJobs(ListJobsRequest) returns (ListJobsResponse) {
+ option (google.api.http) = { get: "/v1/projects/{project_id}/regions/{region}/jobs" };
+ }
+
+ // Starts a job cancellation request. To access the job resource
+ // after cancellation, call
+ // [regions/{region}/jobs.list](/dataproc/reference/rest/v1/projects.regions.jobs/list) or
+ // [regions/{region}/jobs.get](/dataproc/reference/rest/v1/projects.regions.jobs/get).
+ rpc CancelJob(CancelJobRequest) returns (Job) {
+ option (google.api.http) = { post: "/v1/projects/{project_id}/regions/{region}/jobs/{job_id}:cancel" body: "*" };
+ }
+
+ // Deletes the job from the project. If the job is active, the delete fails,
+ // and the response returns `FAILED_PRECONDITION`.
+ rpc DeleteJob(DeleteJobRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/projects/{project_id}/regions/{region}/jobs/{job_id}" };
+ }
+}
+
+// The runtime logging config of the job.
+message LoggingConfig {
+ // The Log4j level for job execution. When running an
+ // [Apache Hive](http://hive.apache.org/) job, Cloud
+ // Dataproc configures the Hive client to an equivalent verbosity level.
+ enum Level {
+ // Level is unspecified. Use default level for log4j.
+ LEVEL_UNSPECIFIED = 0;
+
+ // Use ALL level for log4j.
+ ALL = 1;
+
+ // Use TRACE level for log4j.
+ TRACE = 2;
+
+ // Use DEBUG level for log4j.
+ DEBUG = 3;
+
+ // Use INFO level for log4j.
+ INFO = 4;
+
+ // Use WARN level for log4j.
+ WARN = 5;
+
+ // Use ERROR level for log4j.
+ ERROR = 6;
+
+ // Use FATAL level for log4j.
+ FATAL = 7;
+
+ // Turn off log4j.
+ OFF = 8;
+ }
+
+ // The per-package log levels for the driver. This may include
+ // "root" package name to configure rootLogger.
+ // Examples:
+ // 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+ map<string, Level> driver_log_levels = 2;
+}
+
+// A Cloud Dataproc job for running
+// [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html)
+// jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
+message HadoopJob {
+ // [Required] Indicates the location of the driver's main class. Specify
+ // either the jar file that contains the main class or the main class name.
+ // To specify both, add the jar file to `jar_file_uris`, and then specify
+ // the main class name in this property.
+ oneof driver {
+ // The HCFS URI of the jar file containing the main class.
+ // Examples:
+ // 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar'
+ // 'hdfs:/tmp/test-samples/custom-wordcount.jar'
+ // 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
+ string main_jar_file_uri = 1;
+
+ // The name of the driver's main class. The jar file containing the class
+ // must be in the default CLASSPATH or specified in `jar_file_uris`.
+ string main_class = 2;
+ }
+
+ // [Optional] The arguments to pass to the driver. Do not
+ // include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
+ // properties, since a collision may occur that causes an incorrect job
+ // submission.
+ repeated string args = 3;
+
+ // [Optional] Jar file URIs to add to the CLASSPATHs of the
+ // Hadoop driver and tasks.
+ repeated string jar_file_uris = 4;
+
+ // [Optional] HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
+ // to the working directory of Hadoop drivers and distributed tasks. Useful
+ // for naively parallel tasks.
+ repeated string file_uris = 5;
+
+ // [Optional] HCFS URIs of archives to be extracted in the working directory of
+ // Hadoop drivers and tasks. Supported file types:
+ // .jar, .tar, .tar.gz, .tgz, or .zip.
+ repeated string archive_uris = 6;
+
+ // [Optional] A mapping of property names to values, used to configure Hadoop.
+ // Properties that conflict with values set by the Cloud Dataproc API may be
+ // overwritten. Can include properties set in /etc/hadoop/conf/*-site and
+ // classes in user code.
+ map<string, string> properties = 7;
+
+ // [Optional] The runtime log config for job execution.
+ LoggingConfig logging_config = 8;
+}
+
+// A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/)
+// applications on YARN.
+message SparkJob {
+ // [Required] The specification of the main method to call to drive the job.
+ // Specify either the jar file that contains the main class or the main class
+ // name. To pass both a main jar and a main class in that jar, add the jar to
+ // `CommonJob.jar_file_uris`, and then specify the main class name in `main_class`.
+ oneof driver {
+ // The HCFS URI of the jar file that contains the main class.
+ string main_jar_file_uri = 1;
+
+ // The name of the driver's main class. The jar file that contains the class
+ // must be in the default CLASSPATH or specified in `jar_file_uris`.
+ string main_class = 2;
+ }
+
+ // [Optional] The arguments to pass to the driver. Do not include arguments,
+ // such as `--conf`, that can be set as job properties, since a collision may
+ // occur that causes an incorrect job submission.
+ repeated string args = 3;
+
+ // [Optional] HCFS URIs of jar files to add to the CLASSPATHs of the
+ // Spark driver and tasks.
+ repeated string jar_file_uris = 4;
+
+ // [Optional] HCFS URIs of files to be copied to the working directory of
+ // Spark drivers and distributed tasks. Useful for naively parallel tasks.
+ repeated string file_uris = 5;
+
+ // [Optional] HCFS URIs of archives to be extracted in the working directory
+ // of Spark drivers and tasks. Supported file types:
+ // .jar, .tar, .tar.gz, .tgz, and .zip.
+ repeated string archive_uris = 6;
+
+ // [Optional] A mapping of property names to values, used to configure Spark.
+ // Properties that conflict with values set by the Cloud Dataproc API may be
+ // overwritten. Can include properties set in
+ // /etc/spark/conf/spark-defaults.conf and classes in user code.
+ map<string, string> properties = 7;
+
+ // [Optional] The runtime log config for job execution.
+ LoggingConfig logging_config = 8;
+}
+
+// A Cloud Dataproc job for running
+// [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
+// applications on YARN.
+message PySparkJob {
+ // [Required] The HCFS URI of the main Python file to use as the driver. Must
+ // be a .py file.
+ string main_python_file_uri = 1;
+
+ // [Optional] The arguments to pass to the driver. Do not include arguments,
+ // such as `--conf`, that can be set as job properties, since a collision may
+ // occur that causes an incorrect job submission.
+ repeated string args = 2;
+
+ // [Optional] HCFS file URIs of Python files to pass to the PySpark
+ // framework. Supported file types: .py, .egg, and .zip.
+ repeated string python_file_uris = 3;
+
+ // [Optional] HCFS URIs of jar files to add to the CLASSPATHs of the
+ // Python driver and tasks.
+ repeated string jar_file_uris = 4;
+
+ // [Optional] HCFS URIs of files to be copied to the working directory of
+ // Python drivers and distributed tasks. Useful for naively parallel tasks.
+ repeated string file_uris = 5;
+
+ // [Optional] HCFS URIs of archives to be extracted in the working directory of
+ // .jar, .tar, .tar.gz, .tgz, and .zip.
+ repeated string archive_uris = 6;
+
+ // [Optional] A mapping of property names to values, used to configure PySpark.
+ // Properties that conflict with values set by the Cloud Dataproc API may be
+ // overwritten. Can include properties set in
+ // /etc/spark/conf/spark-defaults.conf and classes in user code.
+ map<string, string> properties = 7;
+
+ // [Optional] The runtime log config for job execution.
+ LoggingConfig logging_config = 8;
+}
+
+// A list of queries to run on a cluster.
+message QueryList {
+ // [Required] The queries to execute. You do not need to terminate a query
+ // with a semicolon. Multiple queries can be specified in one string
+ // by separating each with a semicolon. Here is an example of an Cloud
+ // Dataproc API snippet that uses a QueryList to specify a HiveJob:
+ //
+ // "hiveJob": {
+ // "queryList": {
+ // "queries": [
+ // "query1",
+ // "query2",
+ // "query3;query4",
+ // ]
+ // }
+ // }
+ repeated string queries = 1;
+}
+
+// A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/)
+// queries on YARN.
+message HiveJob {
+ // [Required] The sequence of Hive queries to execute, specified as either
+ // an HCFS file URI or a list of queries.
+ oneof queries {
+ // The HCFS URI of the script that contains Hive queries.
+ string query_file_uri = 1;
+
+ // A list of queries.
+ QueryList query_list = 2;
+ }
+
+ // [Optional] Whether to continue executing queries if a query fails.
+ // The default value is `false`. Setting to `true` can be useful when executing
+ // independent parallel queries.
+ bool continue_on_failure = 3;
+
+ // [Optional] Mapping of query variable names to values (equivalent to the
+ // Hive command: `SET name="value";`).
+ map<string, string> script_variables = 4;
+
+ // [Optional] A mapping of property names and values, used to configure Hive.
+ // Properties that conflict with values set by the Cloud Dataproc API may be
+ // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+ // /etc/hive/conf/hive-site.xml, and classes in user code.
+ map<string, string> properties = 5;
+
+ // [Optional] HCFS URIs of jar files to add to the CLASSPATH of the
+ // Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
+ // and UDFs.
+ repeated string jar_file_uris = 6;
+}
+
+// A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/)
+// queries.
+message SparkSqlJob {
+ // [Required] The sequence of Spark SQL queries to execute, specified as
+ // either an HCFS file URI or as a list of queries.
+ oneof queries {
+ // The HCFS URI of the script that contains SQL queries.
+ string query_file_uri = 1;
+
+ // A list of queries.
+ QueryList query_list = 2;
+ }
+
+ // [Optional] Mapping of query variable names to values (equivalent to the
+ // Spark SQL command: SET `name="value";`).
+ map<string, string> script_variables = 3;
+
+ // [Optional] A mapping of property names to values, used to configure
+ // Spark SQL's SparkConf. Properties that conflict with values set by the
+ // Cloud Dataproc API may be overwritten.
+ map<string, string> properties = 4;
+
+ // [Optional] HCFS URIs of jar files to be added to the Spark CLASSPATH.
+ repeated string jar_file_uris = 56;
+
+ // [Optional] The runtime log config for job execution.
+ LoggingConfig logging_config = 6;
+}
+
+// A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/)
+// queries on YARN.
+message PigJob {
+ // [Required] The sequence of Pig queries to execute, specified as an HCFS
+ // file URI or a list of queries.
+ oneof queries {
+ // The HCFS URI of the script that contains the Pig queries.
+ string query_file_uri = 1;
+
+ // A list of queries.
+ QueryList query_list = 2;
+ }
+
+ // [Optional] Whether to continue executing queries if a query fails.
+ // The default value is `false`. Setting to `true` can be useful when executing
+ // independent parallel queries.
+ bool continue_on_failure = 3;
+
+ // [Optional] Mapping of query variable names to values (equivalent to the Pig
+ // command: `name=[value]`).
+ map<string, string> script_variables = 4;
+
+ // [Optional] A mapping of property names to values, used to configure Pig.
+ // Properties that conflict with values set by the Cloud Dataproc API may be
+ // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+ // /etc/pig/conf/pig.properties, and classes in user code.
+ map<string, string> properties = 5;
+
+ // [Optional] HCFS URIs of jar files to add to the CLASSPATH of
+ // the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
+ repeated string jar_file_uris = 6;
+
+ // [Optional] The runtime log config for job execution.
+ LoggingConfig logging_config = 7;
+}
+
+// Cloud Dataproc job config.
+message JobPlacement {
+ // [Required] The name of the cluster where the job will be submitted.
+ string cluster_name = 1;
+
+ // [Output-only] A cluster UUID generated by the Cloud Dataproc service when
+ // the job is submitted.
+ string cluster_uuid = 2;
+}
+
+// Cloud Dataproc job status.
+message JobStatus {
+ // The job state.
+ enum State {
+ // The job state is unknown.
+ STATE_UNSPECIFIED = 0;
+
+ // The job is pending; it has been submitted, but is not yet running.
+ PENDING = 1;
+
+ // Job has been received by the service and completed initial setup;
+ // it will soon be submitted to the cluster.
+ SETUP_DONE = 8;
+
+ // The job is running on the cluster.
+ RUNNING = 2;
+
+ // A CancelJob request has been received, but is pending.
+ CANCEL_PENDING = 3;
+
+ // Transient in-flight resources have been canceled, and the request to
+ // cancel the running job has been issued to the cluster.
+ CANCEL_STARTED = 7;
+
+ // The job cancellation was successful.
+ CANCELLED = 4;
+
+ // The job has completed successfully.
+ DONE = 5;
+
+ // The job has completed, but encountered an error.
+ ERROR = 6;
+ }
+
+ // [Output-only] A state message specifying the overall job state.
+ State state = 1;
+
+ // [Output-only] Optional job state details, such as an error
+ // description if the state is <code>ERROR</code>.
+ string details = 2;
+
+ // [Output-only] The time when this state was entered.
+ google.protobuf.Timestamp state_start_time = 6;
+}
+
+// Encapsulates the full scoping used to reference a job.
+message JobReference {
+ // [Required] The ID of the Google Cloud Platform project that the job
+ // belongs to.
+ string project_id = 1;
+
+ // [Optional] The job ID, which must be unique within the project. The job ID
+ // is generated by the server upon job submission or provided by the user as a
+ // means to perform retries without creating duplicate jobs. The ID must
+ // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or
+ // hyphens (-). The maximum length is 512 characters.
+ string job_id = 2;
+}
+
+// A Cloud Dataproc job resource.
+message Job {
+ // [Optional] The fully qualified reference to the job, which can be used to
+ // obtain the equivalent REST path of the job resource. If this property
+ // is not specified when a job is created, the server generates a
+ // <code>job_id</code>.
+ JobReference reference = 1;
+
+ // [Required] Job information, including how, when, and where to
+ // run the job.
+ JobPlacement placement = 2;
+
+ // [Required] The application/framework-specific portion of the job.
+ oneof type_job {
+ // Job is a Hadoop job.
+ HadoopJob hadoop_job = 3;
+
+ // Job is a Spark job.
+ SparkJob spark_job = 4;
+
+ // Job is a Pyspark job.
+ PySparkJob pyspark_job = 5;
+
+ // Job is a Hive job.
+ HiveJob hive_job = 6;
+
+ // Job is a Pig job.
+ PigJob pig_job = 7;
+
+ // Job is a SparkSql job.
+ SparkSqlJob spark_sql_job = 12;
+ }
+
+ // [Output-only] The job status. Additional application-specific
+ // status information may be contained in the <code>type_job</code>
+ // and <code>yarn_applications</code> fields.
+ JobStatus status = 8;
+
+ // [Output-only] The previous job status.
+ repeated JobStatus status_history = 13;
+
+ // [Output-only] A URI pointing to the location of the stdout of the job's
+ // driver program.
+ string driver_output_resource_uri = 17;
+
+ // [Output-only] If present, the location of miscellaneous control files
+ // which may be used as part of job setup and handling. If not present,
+ // control files may be placed in the same location as `driver_output_uri`.
+ string driver_control_files_uri = 15;
+}
+
+// A request to submit a job.
+message SubmitJobRequest {
+ // [Required] The ID of the Google Cloud Platform project that the job
+ // belongs to.
+ string project_id = 1;
+
+ // [Required] The Cloud Dataproc region in which to handle the request.
+ string region = 3;
+
+ // [Required] The job resource.
+ Job job = 2;
+}
+
+// A request to get the resource representation for a job in a project.
+message GetJobRequest {
+ // [Required] The ID of the Google Cloud Platform project that the job
+ // belongs to.
+ string project_id = 1;
+
+ // [Required] The Cloud Dataproc region in which to handle the request.
+ string region = 3;
+
+ // [Required] The job ID.
+ string job_id = 2;
+}
+
+// A request to list jobs in a project.
+message ListJobsRequest {
+ // A matcher that specifies categories of job states.
+ enum JobStateMatcher {
+ // Match all jobs, regardless of state.
+ ALL = 0;
+
+ // Only match jobs in non-terminal states: PENDING, RUNNING, or
+ // CANCEL_PENDING.
+ ACTIVE = 1;
+
+ // Only match jobs in terminal states: CANCELLED, DONE, or ERROR.
+ NON_ACTIVE = 2;
+ }
+
+ // [Required] The ID of the Google Cloud Platform project that the job
+ // belongs to.
+ string project_id = 1;
+
+ // [Required] The Cloud Dataproc region in which to handle the request.
+ string region = 6;
+
+ // [Optional] The number of results to return in each response.
+ int32 page_size = 2;
+
+ // [Optional] The page token, returned by a previous call, to request the
+ // next page of results.
+ string page_token = 3;
+
+ // [Optional] If set, the returned jobs list includes only jobs that were
+ // submitted to the named cluster.
+ string cluster_name = 4;
+
+ // [Optional] Specifies enumerated categories of jobs to list
+ // (default = match ALL jobs).
+ JobStateMatcher job_state_matcher = 5;
+}
+
+// A list of jobs in a project.
+message ListJobsResponse {
+ // [Output-only] Jobs list.
+ repeated Job jobs = 1;
+
+ // [Optional] This token is included in the response if there are more results
+ // to fetch. To fetch additional results, provide this value as the
+ // `page_token` in a subsequent <code>ListJobsRequest</code>.
+ string next_page_token = 2;
+}
+
+// A request to cancel a job.
+message CancelJobRequest {
+ // [Required] The ID of the Google Cloud Platform project that the job
+ // belongs to.
+ string project_id = 1;
+
+ // [Required] The Cloud Dataproc region in which to handle the request.
+ string region = 3;
+
+ // [Required] The job ID.
+ string job_id = 2;
+}
+
+// A request to delete a job.
+message DeleteJobRequest {
+ // [Required] The ID of the Google Cloud Platform project that the job
+ // belongs to.
+ string project_id = 1;
+
+ // [Required] The Cloud Dataproc region in which to handle the request.
+ string region = 3;
+
+ // [Required] The job ID.
+ string job_id = 2;
+}
diff --git a/third_party/googleapis/google/cloud/dataproc/v1/operations.proto b/third_party/googleapis/google/cloud/dataproc/v1/operations.proto
new file mode 100644
index 0000000000..61227ed2a8
--- /dev/null
+++ b/third_party/googleapis/google/cloud/dataproc/v1/operations.proto
@@ -0,0 +1,79 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.dataproc.v1;
+
+import "google/api/annotations.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc";
+option java_multiple_files = true;
+option java_outer_classname = "OperationsProto";
+option java_package = "com.google.cloud.dataproc.v1";
+
+
+// The status of the operation.
+message ClusterOperationStatus {
+ // The operation state.
+ enum State {
+ // Unused.
+ UNKNOWN = 0;
+
+ // The operation has been created.
+ PENDING = 1;
+
+ // The operation is running.
+ RUNNING = 2;
+
+ // The operation is done; either cancelled or completed.
+ DONE = 3;
+ }
+
+ // [Output-only] A message containing the operation state.
+ State state = 1;
+
+ // [Output-only] A message containing the detailed operation state.
+ string inner_state = 2;
+
+ // [Output-only]A message containing any operation metadata details.
+ string details = 3;
+
+ // [Output-only] The time this state was entered.
+ google.protobuf.Timestamp state_start_time = 4;
+}
+
+// Metadata describing the operation.
+message ClusterOperationMetadata {
+ // [Output-only] Name of the cluster for the operation.
+ string cluster_name = 7;
+
+ // [Output-only] Cluster UUID for the operation.
+ string cluster_uuid = 8;
+
+ // [Output-only] Current operation status.
+ ClusterOperationStatus status = 9;
+
+ // [Output-only] The previous operation status.
+ repeated ClusterOperationStatus status_history = 10;
+
+ // [Output-only] The operation type.
+ string operation_type = 11;
+
+ // [Output-only] Short description of operation.
+ string description = 12;
+}
diff --git a/third_party/googleapis/google/cloud/functions/README.md b/third_party/googleapis/google/cloud/functions/README.md
new file mode 100644
index 0000000000..4a9d225c7b
--- /dev/null
+++ b/third_party/googleapis/google/cloud/functions/README.md
@@ -0,0 +1,2 @@
+API for managing lightweight user-provided functions executed in response to
+events. \ No newline at end of file
diff --git a/third_party/googleapis/google/cloud/functions/functions.yaml b/third_party/googleapis/google/cloud/functions/functions.yaml
new file mode 100644
index 0000000000..01c703bcdf
--- /dev/null
+++ b/third_party/googleapis/google/cloud/functions/functions.yaml
@@ -0,0 +1,20 @@
+type: google.api.Service
+config_version: 0
+name: cloudfunctions.googleapis.com
+title: Google Cloud Functions API
+
+apis:
+- name: google.cloud.functions.v1beta2.CloudFunctionsService
+
+documentation:
+ summary:
+ 'Google Cloud Functions is a lightweight, event-based, asynchronous compute
+ solution that allows you to create small, single-purpose functions that
+ respond to cloud events without the need to manage a server or a runtime
+ environment.'
+
+authentication:
+ rules:
+ - selector: '*'
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/cloud-platform
diff --git a/third_party/googleapis/google/cloud/functions/v1beta2/functions.proto b/third_party/googleapis/google/cloud/functions/v1beta2/functions.proto
new file mode 100644
index 0000000000..0280a8dea1
--- /dev/null
+++ b/third_party/googleapis/google/cloud/functions/v1beta2/functions.proto
@@ -0,0 +1,295 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.functions.v1beta2;
+
+import "google/api/annotations.proto";
+import "google/api/auth.proto";
+import "google/cloud/functions/v1beta2/operations.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/functions/v1beta2;functions";
+option java_multiple_files = true;
+option java_outer_classname = "FunctionsProto";
+option java_package = "com.google.cloud.functions.v1beta2";
+option objc_class_prefix = "GCF";
+
+
+// A service that application uses to manipulate triggers and functions.
+service CloudFunctionsService {
+ // Returns a list of functions that belong to the requested project.
+ rpc ListFunctions(ListFunctionsRequest) returns (ListFunctionsResponse) {
+ option (google.api.http) = { get: "/v1beta2/{location=projects/*/locations/*}/functions" };
+ }
+
+ // Returns a function with the given name from the requested project.
+ rpc GetFunction(GetFunctionRequest) returns (CloudFunction) {
+ option (google.api.http) = { get: "/v1beta2/{name=projects/*/locations/*/functions/*}" };
+ }
+
+ // Creates a new function. If a function with the given name already exists in
+ // the specified project, the long running operation will return
+ // `ALREADY_EXISTS` error.
+ rpc CreateFunction(CreateFunctionRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1beta2/{location=projects/*/locations/*}/functions" body: "function" };
+ }
+
+ // Updates existing function.
+ rpc UpdateFunction(UpdateFunctionRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { put: "/v1beta2/{name=projects/*/locations/*/functions/*}" body: "function" };
+ }
+
+ // Deletes a function with the given name from the specified project. If the
+ // given function is used by some trigger, the trigger will be updated to
+ // remove this function.
+ rpc DeleteFunction(DeleteFunctionRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { delete: "/v1beta2/{name=projects/*/locations/*/functions/*}" };
+ }
+
+ // Invokes synchronously deployed function. To be used for testing, very
+ // limited traffic allowed.
+ rpc CallFunction(CallFunctionRequest) returns (CallFunctionResponse) {
+ option (google.api.http) = { post: "/v1beta2/{name=projects/*/locations/*/functions/*}:call" body: "*" };
+ }
+}
+
+// Describes a Cloud Function that contains user computation executed in
+// response to an event. It encapsulate function and triggers configurations.
+message CloudFunction {
+ // A user-defined name of the function. Function names must be unique
+ // globally and match pattern `projects/*/locations/*/functions/*`
+ string name = 1;
+
+ // The location of the function source code.
+ oneof source_code {
+ // The URL, starting with gs://, pointing to the zip archive which contains
+ // the function.
+ string source_archive_url = 14;
+
+ // The hosted repository where the function is defined.
+ SourceRepository source_repository = 3;
+ }
+
+ // An event that triggers the function.
+ oneof trigger {
+ // An HTTPS endpoint type of source that can be triggered via URL.
+ HTTPSTrigger https_trigger = 6;
+
+ // A source that fires events in response to a condition in another service.
+ EventTrigger event_trigger = 12;
+ }
+
+ // Output only. Status of the function deployment.
+ CloudFunctionStatus status = 7;
+
+ // Output only. Name of the most recent operation modifying the function. If
+ // the function status is `DEPLOYING` or `DELETING`, then it points to the
+ // active operation.
+ string latest_operation = 8;
+
+ // The name of the function (as defined in source code) that will be
+ // executed. Defaults to the resource name suffix, if not specified. For
+ // backward compatibility, if function with given name is not found, then the
+ // system will try to use function named "function".
+ // For Node.js this is name of a function exported by the module specified
+ // in `source_location`.
+ string entry_point = 9;
+
+ // The function execution timeout. Execution is considered failed and
+ // can be terminated if the function is not completed at the end of the
+ // timeout period. Defaults to 60 seconds.
+ google.protobuf.Duration timeout = 10;
+
+ // The amount of memory in MB available for a function.
+ // Defaults to 256MB.
+ int32 available_memory_mb = 11;
+
+ // Output only. The service account of the function.
+ string service_account = 13;
+
+ // Output only. The last update timestamp of a Cloud Function.
+ google.protobuf.Timestamp update_time = 15;
+}
+
+// Describes HTTPSTrigger, could be used to connect web hooks to function.
+message HTTPSTrigger {
+ // Output only. The deployed url for the function.
+ string url = 1;
+}
+
+// Describes EventTrigger, used to request events be sent from another
+// service.
+message EventTrigger {
+ // `event_type` names contain the service that is sending an event and the
+ // kind of event that was fired. Must be of the form
+ // `providers/*/eventTypes/*` e.g. Directly handle a Message published to
+ // Google Cloud Pub/Sub `providers/cloud.pubsub/eventTypes/topic.publish`
+ //
+ // Handle an object changing in Google Cloud Storage
+ // `providers/cloud.storage/eventTypes/object.change`
+ //
+ // Handle a write to the Firebase Realtime Database
+ // `providers/firebase.database/eventTypes/data.write`
+ string event_type = 1;
+
+ // Which instance of the source's service should send events. E.g. for Pub/Sub
+ // this would be a Pub/Sub topic at `projects/*/topics/*`. For Google Cloud
+ // Storage this would be a bucket at `projects/*/buckets/*`. For any source
+ // that only supports one instance per-project, this should be the name of the
+ // project (`projects/*`)
+ string resource = 2;
+}
+
+// Describes the location of the function source in a remote repository.
+message SourceRepository {
+ // URL to the hosted repository where the function is defined. Only paths in
+ // https://source.developers.google.com domain are supported. The path should
+ // contain the name of the repository.
+ string repository_url = 1;
+
+ // The path within the repository where the function is defined. The path
+ // should point to the directory where Cloud Functions files are located. Use
+ // "/" if the function is defined directly in the root directory of a
+ // repository.
+ string source_path = 2;
+
+ // The version of a function. Defaults to the latest version of the master
+ // branch.
+ oneof version {
+ // The name of the branch from which the function should be fetched.
+ string branch = 3;
+
+ // The name of the tag that captures the state of the repository from
+ // which the function should be fetched.
+ string tag = 4;
+
+ // The id of the revision that captures the state of the repository from
+ // which the function should be fetched.
+ string revision = 5;
+ }
+
+ // Output only. The id of the revision that was resolved at the moment of
+ // function creation or update. For example when a user deployed from a
+ // branch, it will be the revision id of the latest change on this branch at
+ // that time. If user deployed from revision then this value will be always
+ // equal to the revision specified by the user.
+ string deployed_revision = 6;
+}
+
+// Request for the `CreateFunction` method.
+message CreateFunctionRequest {
+ // The project and location in which the function should be created, specified
+ // in the format `projects/*/locations/*`
+ string location = 1;
+
+ // Function to be created.
+ CloudFunction function = 2;
+}
+
+// Request for the `UpdateFunction` method.
+message UpdateFunctionRequest {
+ // The name of the function to be updated.
+ string name = 1;
+
+ // New version of the function.
+ CloudFunction function = 2;
+}
+
+// Request for the `GetFunction` method.
+message GetFunctionRequest {
+ // The name of the function which details should be obtained.
+ string name = 1;
+}
+
+// Request for the `ListFunctions` method.
+message ListFunctionsRequest {
+ // The project and location from which the function should be listed,
+ // specified in the format `projects/*/locations/*`
+ // If you want to list functions in all locations, use "-" in place of a
+ // location.
+ string location = 1;
+
+ // Maximum number of functions to return per call.
+ int32 page_size = 2;
+
+ // The value returned by the last
+ // `ListFunctionsResponse`; indicates that
+ // this is a continuation of a prior `ListFunctions` call, and that the
+ // system should return the next page of data.
+ string page_token = 3;
+}
+
+// Response for the `ListFunctions` method.
+message ListFunctionsResponse {
+ // The functions that match the request.
+ repeated CloudFunction functions = 1;
+
+ // If not empty, indicates that there may be more functions that match
+ // the request; this value should be passed in a new
+ // [google.cloud.functions.v1beta2.ListFunctionsRequest][]
+ // to get more functions.
+ string next_page_token = 2;
+}
+
+// Request for the `DeleteFunction` method.
+message DeleteFunctionRequest {
+ // The name of the function which should be deleted.
+ string name = 1;
+}
+
+// Request for the `CallFunction` method.
+message CallFunctionRequest {
+ // The name of the function to be called.
+ string name = 1;
+
+ // Input to be passed to the function.
+ string data = 2;
+}
+
+// Response of `CallFunction` method.
+message CallFunctionResponse {
+ // Execution id of function invocation.
+ string execution_id = 1;
+
+ // Result populated for successful execution of synchronous function. Will
+ // not be populated if function does not return a result through context.
+ string result = 2;
+
+ // Either system or user-function generated error. Set if execution
+ // was not successful.
+ string error = 3;
+}
+
+// Describes the current stage of a deployment.
+enum CloudFunctionStatus {
+ // Status not specified.
+ STATUS_UNSPECIFIED = 0;
+
+ // Successfully deployed.
+ READY = 1;
+
+ // Not deployed correctly - behavior is undefined. The item should be updated
+ // or deleted to move it out of this state.
+ FAILED = 2;
+
+ // Creation or update in progress.
+ DEPLOYING = 3;
+
+ // Deletion in progress.
+ DELETING = 4;
+}
diff --git a/third_party/googleapis/google/cloud/functions/v1beta2/functions_gapic.yaml b/third_party/googleapis/google/cloud/functions/v1beta2/functions_gapic.yaml
new file mode 100644
index 0000000000..7ef35cf510
--- /dev/null
+++ b/third_party/googleapis/google/cloud/functions/v1beta2/functions_gapic.yaml
@@ -0,0 +1,145 @@
+type: com.google.api.codegen.ConfigProto
+language_settings:
+ java:
+ package_name: com.google.cloud.functions.spi.v1beta2
+ python:
+ package_name: google.cloud.functions.v1beta2
+ go:
+ package_name: cloud.google.com/go/cloud/functions/apiv1beta2
+ csharp:
+ package_name: Google.Cloud.Functions.V1beta2
+ ruby:
+ package_name: Google::Cloud::Functions::V1beta2
+ php:
+ package_name: Google\Cloud\Functions\V1beta2
+ nodejs:
+ package_name: functions.v1beta2
+ domain_layer_location: google-cloud
+license_header:
+ copyright_file: copyright-google.txt
+ license_file: license-header-apache-2.0.txt
+interfaces:
+- name: google.cloud.functions.v1beta2.CloudFunctionsService
+ collections:
+ - name_pattern: projects/{project}/locations/{location}
+ entity_name: location
+ - name_pattern: projects/{project}/locations/{location}/functions/{function}
+ entity_name: function
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 20000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 20000
+ total_timeout_millis: 600000
+ methods:
+ - name: ListFunctions
+ flattening:
+ groups:
+ - parameters:
+ - location
+ required_fields:
+ - location
+ request_object_method: true
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: functions
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ location: location
+ timeout_millis: 60000
+ - name: GetFunction
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: function
+ timeout_millis: 60000
+ - name: CreateFunction
+ flattening:
+ groups:
+ - parameters:
+ - location
+ - function
+ required_fields:
+ - location
+ - function
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ location: location
+ timeout_millis: 60000
+ long_running:
+ return_type: google.cloud.functions.v1beta2.CloudFunction
+ metadata_type: google.cloud.functions.v1beta2.OperationMetadataV1Beta2
+ - name: UpdateFunction
+ flattening:
+ groups:
+ - parameters:
+ - name
+ - function
+ required_fields:
+ - name
+ - function
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: function
+ timeout_millis: 60000
+ long_running:
+ return_type: google.cloud.functions.v1beta2.CloudFunction
+ metadata_type: google.cloud.functions.v1beta2.OperationMetadataV1Beta2
+ - name: DeleteFunction
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: function
+ timeout_millis: 60000
+ long_running:
+ return_type: google.protobuf.Empty
+ metadata_type: google.cloud.functions.v1beta2.OperationMetadataV1Beta2
+ - name: CallFunction
+ flattening:
+ groups:
+ - parameters:
+ - name
+ - data
+ required_fields:
+ - name
+ - data
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: function
+ timeout_millis: 60000
diff --git a/third_party/googleapis/google/cloud/functions/v1beta2/operations.proto b/third_party/googleapis/google/cloud/functions/v1beta2/operations.proto
new file mode 100644
index 0000000000..2a6f267e84
--- /dev/null
+++ b/third_party/googleapis/google/cloud/functions/v1beta2/operations.proto
@@ -0,0 +1,54 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.functions.v1beta2;
+
+import "google/api/annotations.proto";
+import "google/protobuf/any.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/functions/v1beta2;functions";
+option java_multiple_files = true;
+option java_outer_classname = "FunctionsOperationsProto";
+option java_package = "com.google.cloud.functions.v1beta2";
+
+
+// Metadata describing an [Operation][google.longrunning.Operation]
+message OperationMetadataV1Beta2 {
+ // Target of the operation - for example
+ // projects/project-1/locations/region-1/functions/function-1
+ string target = 1;
+
+ // Type of operation.
+ OperationType type = 2;
+
+ // The original request that started the operation.
+ google.protobuf.Any request = 3;
+}
+
+// A type of an operation.
+enum OperationType {
+ // Unknown operation type.
+ OPERATION_UNSPECIFIED = 0;
+
+ // Triggered by CreateFunction call
+ CREATE_FUNCTION = 1;
+
+ // Triggered by UpdateFunction call
+ UPDATE_FUNCTION = 2;
+
+ // Triggered by DeleteFunction call.
+ DELETE_FUNCTION = 3;
+}
diff --git a/third_party/googleapis/google/cloud/language/README.md b/third_party/googleapis/google/cloud/language/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/googleapis/google/cloud/language/README.md
diff --git a/third_party/googleapis/google/cloud/language/language_v1.yaml b/third_party/googleapis/google/cloud/language/language_v1.yaml
new file mode 100644
index 0000000000..7af5eaee69
--- /dev/null
+++ b/third_party/googleapis/google/cloud/language/language_v1.yaml
@@ -0,0 +1,19 @@
+type: google.api.Service
+config_version: 2
+name: language.googleapis.com
+title: Google Cloud Natural Language API
+
+apis:
+- name: google.cloud.language.v1.LanguageService
+
+documentation:
+ summary:
+ 'Google Cloud Natural Language API provides natural language understanding
+ technologies to developers. Examples include sentiment analysis, entity
+ recognition, and text annotations.'
+
+authentication:
+ rules:
+ - selector: '*'
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/cloud-platform
diff --git a/third_party/googleapis/google/cloud/language/language_v1beta2.yaml b/third_party/googleapis/google/cloud/language/language_v1beta2.yaml
new file mode 100644
index 0000000000..aae1f6f55e
--- /dev/null
+++ b/third_party/googleapis/google/cloud/language/language_v1beta2.yaml
@@ -0,0 +1,19 @@
+type: google.api.Service
+config_version: 2
+name: language.googleapis.com
+title: Google Cloud Natural Language API
+
+apis:
+- name: google.cloud.language.v1beta2.LanguageService
+
+documentation:
+ summary:
+ 'Google Cloud Natural Language API provides natural language understanding
+ technologies to developers. Examples include sentiment analysis, entity
+ recognition, and text annotations.'
+
+authentication:
+ rules:
+ - selector: '*'
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/cloud-platform
diff --git a/third_party/googleapis/google/cloud/language/v1/language_gapic.yaml b/third_party/googleapis/google/cloud/language/v1/language_gapic.yaml
new file mode 100644
index 0000000000..024690ad6c
--- /dev/null
+++ b/third_party/googleapis/google/cloud/language/v1/language_gapic.yaml
@@ -0,0 +1,93 @@
+type: com.google.api.codegen.ConfigProto
+language_settings:
+ java:
+ package_name: com.google.cloud.language.spi.v1
+ python:
+ package_name: google.cloud.gapic.language.v1
+ go:
+ package_name: cloud.google.com/go/language/apiv1
+ csharp:
+ package_name: Google.Cloud.Language.V1
+ ruby:
+ package_name: Google::Cloud::Language::V1
+ php:
+ package_name: Google\Cloud\Language\V1
+ nodejs:
+ package_name: language.v1
+ domain_layer_location: google-cloud
+license_header:
+ copyright_file: copyright-google.txt
+ license_file: license-header-apache-2.0.txt
+interfaces:
+- name: google.cloud.language.v1.LanguageService
+ collections: []
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 60000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 60000
+ total_timeout_millis: 600000
+ methods:
+ - name: AnalyzeSentiment
+ flattening:
+ groups:
+ - parameters:
+ - document
+ required_fields:
+ - document
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ - name: AnalyzeEntities
+ flattening:
+ groups:
+ - parameters:
+ - document
+ - encoding_type
+ required_fields:
+ - document
+ - encoding_type
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ - name: AnalyzeSyntax
+ flattening:
+ groups:
+ - parameters:
+ - document
+ - encoding_type
+ required_fields:
+ - document
+ - encoding_type
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ - name: AnnotateText
+ flattening:
+ groups:
+ - parameters:
+ - document
+ - features
+ - encoding_type
+ required_fields:
+ - document
+ - features
+ - encoding_type
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 30000
diff --git a/third_party/googleapis/google/cloud/language/v1/language_service.proto b/third_party/googleapis/google/cloud/language/v1/language_service.proto
new file mode 100644
index 0000000000..e8758e5ba4
--- /dev/null
+++ b/third_party/googleapis/google/cloud/language/v1/language_service.proto
@@ -0,0 +1,948 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.language.v1;
+
+import "google/api/annotations.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/language/v1;language";
+option java_multiple_files = true;
+option java_outer_classname = "LanguageServiceProto";
+option java_package = "com.google.cloud.language.v1";
+
+
+// Provides text analysis operations such as sentiment analysis and entity
+// recognition.
+service LanguageService {
+ // Analyzes the sentiment of the provided text.
+ rpc AnalyzeSentiment(AnalyzeSentimentRequest) returns (AnalyzeSentimentResponse) {
+ option (google.api.http) = { post: "/v1/documents:analyzeSentiment" body: "*" };
+ }
+
+ // Finds named entities (currently proper names and common nouns) in the text
+ // along with entity types, salience, mentions for each entity, and
+ // other properties.
+ rpc AnalyzeEntities(AnalyzeEntitiesRequest) returns (AnalyzeEntitiesResponse) {
+ option (google.api.http) = { post: "/v1/documents:analyzeEntities" body: "*" };
+ }
+
+ // Analyzes the syntax of the text and provides sentence boundaries and
+ // tokenization along with part of speech tags, dependency trees, and other
+ // properties.
+ rpc AnalyzeSyntax(AnalyzeSyntaxRequest) returns (AnalyzeSyntaxResponse) {
+ option (google.api.http) = { post: "/v1/documents:analyzeSyntax" body: "*" };
+ }
+
+ // A convenience method that provides all the features that analyzeSentiment,
+ // analyzeEntities, and analyzeSyntax provide in one call.
+ rpc AnnotateText(AnnotateTextRequest) returns (AnnotateTextResponse) {
+ option (google.api.http) = { post: "/v1/documents:annotateText" body: "*" };
+ }
+}
+
+// ################################################################ #
+//
+// Represents the input to API methods.
+message Document {
+ // The document types enum.
+ enum Type {
+ // The content type is not specified.
+ TYPE_UNSPECIFIED = 0;
+
+ // Plain text
+ PLAIN_TEXT = 1;
+
+ // HTML
+ HTML = 2;
+ }
+
+ // Required. If the type is not set or is `TYPE_UNSPECIFIED`,
+ // returns an `INVALID_ARGUMENT` error.
+ Type type = 1;
+
+ // The source of the document: a string containing the content or a
+ // Google Cloud Storage URI.
+ oneof source {
+ // The content of the input in string format.
+ string content = 2;
+
+ // The Google Cloud Storage URI where the file content is located.
+ // This URI must be of the form: gs://bucket_name/object_name. For more
+ // details, see https://cloud.google.com/storage/docs/reference-uris.
+ // NOTE: Cloud Storage object versioning is not supported.
+ string gcs_content_uri = 3;
+ }
+
+ // The language of the document (if not specified, the language is
+ // automatically detected). Both ISO and BCP-47 language codes are
+ // accepted.<br>
+ // [Language Support](https://cloud.google.com/natural-language/docs/languages)
+ // lists currently supported languages for each API method.
+ // If the language (either specified by the caller or automatically detected)
+ // is not supported by the called API method, an `INVALID_ARGUMENT` error
+ // is returned.
+ string language = 4;
+}
+
+// Represents a sentence in the input document.
+message Sentence {
+ // The sentence text.
+ TextSpan text = 1;
+
+ // For calls to [AnalyzeSentiment][] or if
+ // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_document_sentiment] is set to
+ // true, this field will contain the sentiment for the sentence.
+ Sentiment sentiment = 2;
+}
+
+// Represents a phrase in the text that is a known entity, such as
+// a person, an organization, or location. The API associates information, such
+// as salience and mentions, with entities.
+message Entity {
+ // The type of the entity.
+ enum Type {
+ // Unknown
+ UNKNOWN = 0;
+
+ // Person
+ PERSON = 1;
+
+ // Location
+ LOCATION = 2;
+
+ // Organization
+ ORGANIZATION = 3;
+
+ // Event
+ EVENT = 4;
+
+ // Work of art
+ WORK_OF_ART = 5;
+
+ // Consumer goods
+ CONSUMER_GOOD = 6;
+
+ // Other types
+ OTHER = 7;
+ }
+
+ // The representative name for the entity.
+ string name = 1;
+
+ // The entity type.
+ Type type = 2;
+
+ // Metadata associated with the entity.
+ //
+ // Currently, Wikipedia URLs and Knowledge Graph MIDs are provided, if
+ // available. The associated keys are "wikipedia_url" and "mid", respectively.
+ map<string, string> metadata = 3;
+
+ // The salience score associated with the entity in the [0, 1.0] range.
+ //
+ // The salience score for an entity provides information about the
+ // importance or centrality of that entity to the entire document text.
+ // Scores closer to 0 are less salient, while scores closer to 1.0 are highly
+ // salient.
+ float salience = 4;
+
+ // The mentions of this entity in the input document. The API currently
+ // supports proper noun mentions.
+ repeated EntityMention mentions = 5;
+}
+
+// Represents the smallest syntactic building block of the text.
+message Token {
+ // The token text.
+ TextSpan text = 1;
+
+ // Parts of speech tag for this token.
+ PartOfSpeech part_of_speech = 2;
+
+ // Dependency tree parse for this token.
+ DependencyEdge dependency_edge = 3;
+
+ // [Lemma](https://en.wikipedia.org/wiki/Lemma_%28morphology%29) of the token.
+ string lemma = 4;
+}
+
+// Represents the feeling associated with the entire text or entities in
+// the text.
+message Sentiment {
+ // A non-negative number in the [0, +inf) range, which represents
+ // the absolute magnitude of sentiment regardless of score (positive or
+ // negative).
+ float magnitude = 2;
+
+ // Sentiment score between -1.0 (negative sentiment) and 1.0
+ // (positive sentiment).
+ float score = 3;
+}
+
+// Represents part of speech information for a token. Parts of speech
+// are as defined in
+// http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf
+message PartOfSpeech {
+ // The part of speech tags enum.
+ enum Tag {
+ // Unknown
+ UNKNOWN = 0;
+
+ // Adjective
+ ADJ = 1;
+
+ // Adposition (preposition and postposition)
+ ADP = 2;
+
+ // Adverb
+ ADV = 3;
+
+ // Conjunction
+ CONJ = 4;
+
+ // Determiner
+ DET = 5;
+
+ // Noun (common and proper)
+ NOUN = 6;
+
+ // Cardinal number
+ NUM = 7;
+
+ // Pronoun
+ PRON = 8;
+
+ // Particle or other function word
+ PRT = 9;
+
+ // Punctuation
+ PUNCT = 10;
+
+ // Verb (all tenses and modes)
+ VERB = 11;
+
+ // Other: foreign words, typos, abbreviations
+ X = 12;
+
+ // Affix
+ AFFIX = 13;
+ }
+
+ // The characteristic of a verb that expresses time flow during an event.
+ enum Aspect {
+ // Aspect is not applicable in the analyzed language or is not predicted.
+ ASPECT_UNKNOWN = 0;
+
+ // Perfective
+ PERFECTIVE = 1;
+
+ // Imperfective
+ IMPERFECTIVE = 2;
+
+ // Progressive
+ PROGRESSIVE = 3;
+ }
+
+ // The grammatical function performed by a noun or pronoun in a phrase,
+ // clause, or sentence. In some languages, other parts of speech, such as
+ // adjective and determiner, take case inflection in agreement with the noun.
+ enum Case {
+ // Case is not applicable in the analyzed language or is not predicted.
+ CASE_UNKNOWN = 0;
+
+ // Accusative
+ ACCUSATIVE = 1;
+
+ // Adverbial
+ ADVERBIAL = 2;
+
+ // Complementive
+ COMPLEMENTIVE = 3;
+
+ // Dative
+ DATIVE = 4;
+
+ // Genitive
+ GENITIVE = 5;
+
+ // Instrumental
+ INSTRUMENTAL = 6;
+
+ // Locative
+ LOCATIVE = 7;
+
+ // Nominative
+ NOMINATIVE = 8;
+
+ // Oblique
+ OBLIQUE = 9;
+
+ // Partitive
+ PARTITIVE = 10;
+
+ // Prepositional
+ PREPOSITIONAL = 11;
+
+ // Reflexive
+ REFLEXIVE_CASE = 12;
+
+ // Relative
+ RELATIVE_CASE = 13;
+
+ // Vocative
+ VOCATIVE = 14;
+ }
+
+ // Depending on the language, Form can be categorizing different forms of
+ // verbs, adjectives, adverbs, etc. For example, categorizing inflected
+ // endings of verbs and adjectives or distinguishing between short and long
+ // forms of adjectives and participles
+ enum Form {
+ // Form is not applicable in the analyzed language or is not predicted.
+ FORM_UNKNOWN = 0;
+
+ // Adnomial
+ ADNOMIAL = 1;
+
+ // Auxiliary
+ AUXILIARY = 2;
+
+ // Complementizer
+ COMPLEMENTIZER = 3;
+
+ // Final ending
+ FINAL_ENDING = 4;
+
+ // Gerund
+ GERUND = 5;
+
+ // Realis
+ REALIS = 6;
+
+ // Irrealis
+ IRREALIS = 7;
+
+ // Short form
+ SHORT = 8;
+
+ // Long form
+ LONG = 9;
+
+ // Order form
+ ORDER = 10;
+
+ // Specific form
+ SPECIFIC = 11;
+ }
+
+ // Gender classes of nouns reflected in the behaviour of associated words.
+ enum Gender {
+ // Gender is not applicable in the analyzed language or is not predicted.
+ GENDER_UNKNOWN = 0;
+
+ // Feminine
+ FEMININE = 1;
+
+ // Masculine
+ MASCULINE = 2;
+
+ // Neuter
+ NEUTER = 3;
+ }
+
+ // The grammatical feature of verbs, used for showing modality and attitude.
+ enum Mood {
+ // Mood is not applicable in the analyzed language or is not predicted.
+ MOOD_UNKNOWN = 0;
+
+ // Conditional
+ CONDITIONAL_MOOD = 1;
+
+ // Imperative
+ IMPERATIVE = 2;
+
+ // Indicative
+ INDICATIVE = 3;
+
+ // Interrogative
+ INTERROGATIVE = 4;
+
+ // Jussive
+ JUSSIVE = 5;
+
+ // Subjunctive
+ SUBJUNCTIVE = 6;
+ }
+
+ // Count distinctions.
+ enum Number {
+ // Number is not applicable in the analyzed language or is not predicted.
+ NUMBER_UNKNOWN = 0;
+
+ // Singular
+ SINGULAR = 1;
+
+ // Plural
+ PLURAL = 2;
+
+ // Dual
+ DUAL = 3;
+ }
+
+ // The distinction between the speaker, second person, third person, etc.
+ enum Person {
+ // Person is not applicable in the analyzed language or is not predicted.
+ PERSON_UNKNOWN = 0;
+
+ // First
+ FIRST = 1;
+
+ // Second
+ SECOND = 2;
+
+ // Third
+ THIRD = 3;
+
+ // Reflexive
+ REFLEXIVE_PERSON = 4;
+ }
+
+ // This category shows if the token is part of a proper name.
+ enum Proper {
+ // Proper is not applicable in the analyzed language or is not predicted.
+ PROPER_UNKNOWN = 0;
+
+ // Proper
+ PROPER = 1;
+
+ // Not proper
+ NOT_PROPER = 2;
+ }
+
+ // Reciprocal features of a pronoun.
+ enum Reciprocity {
+ // Reciprocity is not applicable in the analyzed language or is not
+ // predicted.
+ RECIPROCITY_UNKNOWN = 0;
+
+ // Reciprocal
+ RECIPROCAL = 1;
+
+ // Non-reciprocal
+ NON_RECIPROCAL = 2;
+ }
+
+ // Time reference.
+ enum Tense {
+ // Tense is not applicable in the analyzed language or is not predicted.
+ TENSE_UNKNOWN = 0;
+
+ // Conditional
+ CONDITIONAL_TENSE = 1;
+
+ // Future
+ FUTURE = 2;
+
+ // Past
+ PAST = 3;
+
+ // Present
+ PRESENT = 4;
+
+ // Imperfect
+ IMPERFECT = 5;
+
+ // Pluperfect
+ PLUPERFECT = 6;
+ }
+
+ // The relationship between the action that a verb expresses and the
+ // participants identified by its arguments.
+ enum Voice {
+ // Voice is not applicable in the analyzed language or is not predicted.
+ VOICE_UNKNOWN = 0;
+
+ // Active
+ ACTIVE = 1;
+
+ // Causative
+ CAUSATIVE = 2;
+
+ // Passive
+ PASSIVE = 3;
+ }
+
+ // The part of speech tag.
+ Tag tag = 1;
+
+ // The grammatical aspect.
+ Aspect aspect = 2;
+
+ // The grammatical case.
+ Case case = 3;
+
+ // The grammatical form.
+ Form form = 4;
+
+ // The grammatical gender.
+ Gender gender = 5;
+
+ // The grammatical mood.
+ Mood mood = 6;
+
+ // The grammatical number.
+ Number number = 7;
+
+ // The grammatical person.
+ Person person = 8;
+
+ // The grammatical properness.
+ Proper proper = 9;
+
+ // The grammatical reciprocity.
+ Reciprocity reciprocity = 10;
+
+ // The grammatical tense.
+ Tense tense = 11;
+
+ // The grammatical voice.
+ Voice voice = 12;
+}
+
+// Represents dependency parse tree information for a token. (For more
+// information on dependency labels, see
+// http://www.aclweb.org/anthology/P13-2017
+message DependencyEdge {
+ // The parse label enum for the token.
+ enum Label {
+ // Unknown
+ UNKNOWN = 0;
+
+ // Abbreviation modifier
+ ABBREV = 1;
+
+ // Adjectival complement
+ ACOMP = 2;
+
+ // Adverbial clause modifier
+ ADVCL = 3;
+
+ // Adverbial modifier
+ ADVMOD = 4;
+
+ // Adjectival modifier of an NP
+ AMOD = 5;
+
+ // Appositional modifier of an NP
+ APPOS = 6;
+
+ // Attribute dependent of a copular verb
+ ATTR = 7;
+
+ // Auxiliary (non-main) verb
+ AUX = 8;
+
+ // Passive auxiliary
+ AUXPASS = 9;
+
+ // Coordinating conjunction
+ CC = 10;
+
+ // Clausal complement of a verb or adjective
+ CCOMP = 11;
+
+ // Conjunct
+ CONJ = 12;
+
+ // Clausal subject
+ CSUBJ = 13;
+
+ // Clausal passive subject
+ CSUBJPASS = 14;
+
+ // Dependency (unable to determine)
+ DEP = 15;
+
+ // Determiner
+ DET = 16;
+
+ // Discourse
+ DISCOURSE = 17;
+
+ // Direct object
+ DOBJ = 18;
+
+ // Expletive
+ EXPL = 19;
+
+ // Goes with (part of a word in a text not well edited)
+ GOESWITH = 20;
+
+ // Indirect object
+ IOBJ = 21;
+
+ // Marker (word introducing a subordinate clause)
+ MARK = 22;
+
+ // Multi-word expression
+ MWE = 23;
+
+ // Multi-word verbal expression
+ MWV = 24;
+
+ // Negation modifier
+ NEG = 25;
+
+ // Noun compound modifier
+ NN = 26;
+
+ // Noun phrase used as an adverbial modifier
+ NPADVMOD = 27;
+
+ // Nominal subject
+ NSUBJ = 28;
+
+ // Passive nominal subject
+ NSUBJPASS = 29;
+
+ // Numeric modifier of a noun
+ NUM = 30;
+
+ // Element of compound number
+ NUMBER = 31;
+
+ // Punctuation mark
+ P = 32;
+
+ // Parataxis relation
+ PARATAXIS = 33;
+
+ // Participial modifier
+ PARTMOD = 34;
+
+ // The complement of a preposition is a clause
+ PCOMP = 35;
+
+ // Object of a preposition
+ POBJ = 36;
+
+ // Possession modifier
+ POSS = 37;
+
+ // Postverbal negative particle
+ POSTNEG = 38;
+
+ // Predicate complement
+ PRECOMP = 39;
+
+ // Preconjunt
+ PRECONJ = 40;
+
+ // Predeterminer
+ PREDET = 41;
+
+ // Prefix
+ PREF = 42;
+
+ // Prepositional modifier
+ PREP = 43;
+
+ // The relationship between a verb and verbal morpheme
+ PRONL = 44;
+
+ // Particle
+ PRT = 45;
+
+ // Associative or possessive marker
+ PS = 46;
+
+ // Quantifier phrase modifier
+ QUANTMOD = 47;
+
+ // Relative clause modifier
+ RCMOD = 48;
+
+ // Complementizer in relative clause
+ RCMODREL = 49;
+
+ // Ellipsis without a preceding predicate
+ RDROP = 50;
+
+ // Referent
+ REF = 51;
+
+ // Remnant
+ REMNANT = 52;
+
+ // Reparandum
+ REPARANDUM = 53;
+
+ // Root
+ ROOT = 54;
+
+ // Suffix specifying a unit of number
+ SNUM = 55;
+
+ // Suffix
+ SUFF = 56;
+
+ // Temporal modifier
+ TMOD = 57;
+
+ // Topic marker
+ TOPIC = 58;
+
+ // Clause headed by an infinite form of the verb that modifies a noun
+ VMOD = 59;
+
+ // Vocative
+ VOCATIVE = 60;
+
+ // Open clausal complement
+ XCOMP = 61;
+
+ // Name suffix
+ SUFFIX = 62;
+
+ // Name title
+ TITLE = 63;
+
+ // Adverbial phrase modifier
+ ADVPHMOD = 64;
+
+ // Causative auxiliary
+ AUXCAUS = 65;
+
+ // Helper auxiliary
+ AUXVV = 66;
+
+ // Rentaishi (Prenominal modifier)
+ DTMOD = 67;
+
+ // Foreign words
+ FOREIGN = 68;
+
+ // Keyword
+ KW = 69;
+
+ // List for chains of comparable items
+ LIST = 70;
+
+ // Nominalized clause
+ NOMC = 71;
+
+ // Nominalized clausal subject
+ NOMCSUBJ = 72;
+
+ // Nominalized clausal passive
+ NOMCSUBJPASS = 73;
+
+ // Compound of numeric modifier
+ NUMC = 74;
+
+ // Copula
+ COP = 75;
+
+ // Dislocated relation (for fronted/topicalized elements)
+ DISLOCATED = 76;
+ }
+
+ // Represents the head of this token in the dependency tree.
+ // This is the index of the token which has an arc going to this token.
+ // The index is the position of the token in the array of tokens returned
+ // by the API method. If this token is a root token, then the
+ // `head_token_index` is its own index.
+ int32 head_token_index = 1;
+
+ // The parse label for the token.
+ Label label = 2;
+}
+
+// Represents a mention for an entity in the text. Currently, proper noun
+// mentions are supported.
+message EntityMention {
+ // The supported types of mentions.
+ enum Type {
+ // Unknown
+ TYPE_UNKNOWN = 0;
+
+ // Proper name
+ PROPER = 1;
+
+ // Common noun (or noun compound)
+ COMMON = 2;
+ }
+
+ // The mention text.
+ TextSpan text = 1;
+
+ // The type of the entity mention.
+ Type type = 2;
+}
+
+// Represents an output piece of text.
+message TextSpan {
+ // The content of the output text.
+ string content = 1;
+
+ // The API calculates the beginning offset of the content in the original
+ // document according to the [EncodingType][google.cloud.language.v1.EncodingType] specified in the API request.
+ int32 begin_offset = 2;
+}
+
+// The sentiment analysis request message.
+message AnalyzeSentimentRequest {
+ // Input document.
+ Document document = 1;
+
+ // The encoding type used by the API to calculate sentence offsets.
+ EncodingType encoding_type = 2;
+}
+
+// The sentiment analysis response message.
+message AnalyzeSentimentResponse {
+ // The overall sentiment of the input document.
+ Sentiment document_sentiment = 1;
+
+ // The language of the text, which will be the same as the language specified
+ // in the request or, if not specified, the automatically-detected language.
+ // See [Document.language][google.cloud.language.v1.Document.language] field for more details.
+ string language = 2;
+
+ // The sentiment for all the sentences in the document.
+ repeated Sentence sentences = 3;
+}
+
+// The entity analysis request message.
+message AnalyzeEntitiesRequest {
+ // Input document.
+ Document document = 1;
+
+ // The encoding type used by the API to calculate offsets.
+ EncodingType encoding_type = 2;
+}
+
+// The entity analysis response message.
+message AnalyzeEntitiesResponse {
+ // The recognized entities in the input document.
+ repeated Entity entities = 1;
+
+ // The language of the text, which will be the same as the language specified
+ // in the request or, if not specified, the automatically-detected language.
+ // See [Document.language][google.cloud.language.v1.Document.language] field for more details.
+ string language = 2;
+}
+
+// The syntax analysis request message.
+message AnalyzeSyntaxRequest {
+ // Input document.
+ Document document = 1;
+
+ // The encoding type used by the API to calculate offsets.
+ EncodingType encoding_type = 2;
+}
+
+// The syntax analysis response message.
+message AnalyzeSyntaxResponse {
+ // Sentences in the input document.
+ repeated Sentence sentences = 1;
+
+ // Tokens, along with their syntactic information, in the input document.
+ repeated Token tokens = 2;
+
+ // The language of the text, which will be the same as the language specified
+ // in the request or, if not specified, the automatically-detected language.
+ // See [Document.language][google.cloud.language.v1.Document.language] field for more details.
+ string language = 3;
+}
+
+// The request message for the text annotation API, which can perform multiple
+// analysis types (sentiment, entities, and syntax) in one call.
+message AnnotateTextRequest {
+ // All available features for sentiment, syntax, and semantic analysis.
+ // Setting each one to true will enable that specific analysis for the input.
+ message Features {
+ // Extract syntax information.
+ bool extract_syntax = 1;
+
+ // Extract entities.
+ bool extract_entities = 2;
+
+ // Extract document-level sentiment.
+ bool extract_document_sentiment = 3;
+ }
+
+ // Input document.
+ Document document = 1;
+
+ // The enabled features.
+ Features features = 2;
+
+ // The encoding type used by the API to calculate offsets.
+ EncodingType encoding_type = 3;
+}
+
+// The text annotations response message.
+message AnnotateTextResponse {
+ // Sentences in the input document. Populated if the user enables
+ // [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1.AnnotateTextRequest.Features.extract_syntax].
+ repeated Sentence sentences = 1;
+
+ // Tokens, along with their syntactic information, in the input document.
+ // Populated if the user enables
+ // [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1.AnnotateTextRequest.Features.extract_syntax].
+ repeated Token tokens = 2;
+
+ // Entities, along with their semantic information, in the input document.
+ // Populated if the user enables
+ // [AnnotateTextRequest.Features.extract_entities][google.cloud.language.v1.AnnotateTextRequest.Features.extract_entities].
+ repeated Entity entities = 3;
+
+ // The overall sentiment for the document. Populated if the user enables
+ // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_document_sentiment].
+ Sentiment document_sentiment = 4;
+
+ // The language of the text, which will be the same as the language specified
+ // in the request or, if not specified, the automatically-detected language.
+ // See [Document.language][google.cloud.language.v1.Document.language] field for more details.
+ string language = 5;
+}
+
+// Represents the text encoding that the caller uses to process the output.
+// Providing an `EncodingType` is recommended because the API provides the
+// beginning offsets for various outputs, such as tokens and mentions, and
+// languages that natively use different text encodings may access offsets
+// differently.
+enum EncodingType {
+ // If `EncodingType` is not specified, encoding-dependent information (such as
+ // `begin_offset`) will be set at `-1`.
+ NONE = 0;
+
+ // Encoding-dependent information (such as `begin_offset`) is calculated based
+ // on the UTF-8 encoding of the input. C++ and Go are examples of languages
+ // that use this encoding natively.
+ UTF8 = 1;
+
+ // Encoding-dependent information (such as `begin_offset`) is calculated based
+ // on the UTF-16 encoding of the input. Java and Javascript are examples of
+ // languages that use this encoding natively.
+ UTF16 = 2;
+
+ // Encoding-dependent information (such as `begin_offset`) is calculated based
+ // on the UTF-32 encoding of the input. Python is an example of a language
+ // that uses this encoding natively.
+ UTF32 = 3;
+}
diff --git a/third_party/googleapis/google/cloud/language/v1beta1/language_gapic.yaml b/third_party/googleapis/google/cloud/language/v1beta1/language_gapic.yaml
new file mode 100644
index 0000000000..7b7d83622e
--- /dev/null
+++ b/third_party/googleapis/google/cloud/language/v1beta1/language_gapic.yaml
@@ -0,0 +1,84 @@
+type: com.google.api.codegen.ConfigProto
+language_settings:
+ java:
+ package_name: com.google.cloud.language.spi.v1beta1
+ python:
+ package_name: google.cloud.gapic.language.v1beta1
+ go:
+ package_name: cloud.google.com/go/language/apiv1beta1
+ csharp:
+ package_name: Google.Cloud.Language.V1Beta1
+ ruby:
+ package_name: Google::Cloud::Language::V1beta1
+ php:
+ package_name: Google\Cloud\Language\V1Beta1
+ nodejs:
+ package_name: language.v1beta1
+ domain_layer_location: google-cloud
+license_header:
+ copyright_file: copyright-google.txt
+ license_file: license-header-apache-2.0.txt
+interfaces:
+- name: google.cloud.language.v1beta1.LanguageService
+ smoke_test:
+ method: AnalyzeSentiment
+ init_fields:
+ - document.type=PLAIN_TEXT
+ collections: []
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 60000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 60000
+ total_timeout_millis: 600000
+ methods:
+ - name: AnalyzeSentiment
+ flattening:
+ groups:
+ - parameters:
+ - document
+ required_fields:
+ - document
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ - name: AnalyzeEntities
+ flattening:
+ groups:
+ - parameters:
+ - document
+ - encoding_type
+ required_fields:
+ - document
+ - encoding_type
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ - name: AnnotateText
+ flattening:
+ groups:
+ - parameters:
+ - document
+ - features
+ - encoding_type
+ required_fields:
+ - document
+ - features
+ - encoding_type
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 30000
diff --git a/third_party/googleapis/google/cloud/language/v1beta1/language_service.proto b/third_party/googleapis/google/cloud/language/v1beta1/language_service.proto
new file mode 100644
index 0000000000..27212ac726
--- /dev/null
+++ b/third_party/googleapis/google/cloud/language/v1beta1/language_service.proto
@@ -0,0 +1,950 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.language.v1beta1;
+
+import "google/api/annotations.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/language/v1beta1;language";
+option java_multiple_files = true;
+option java_outer_classname = "LanguageServiceProto";
+option java_package = "com.google.cloud.language.v1beta1";
+
+
+// Provides text analysis operations such as sentiment analysis and entity
+// recognition.
+service LanguageService {
+ // Analyzes the sentiment of the provided text.
+ rpc AnalyzeSentiment(AnalyzeSentimentRequest) returns (AnalyzeSentimentResponse) {
+ option (google.api.http) = { post: "/v1beta1/documents:analyzeSentiment" body: "*" };
+ }
+
+ // Finds named entities (currently proper names and common nouns) in the text
+ // along with entity types, salience, mentions for each entity, and
+ // other properties.
+ rpc AnalyzeEntities(AnalyzeEntitiesRequest) returns (AnalyzeEntitiesResponse) {
+ option (google.api.http) = { post: "/v1beta1/documents:analyzeEntities" body: "*" };
+ }
+
+ // Analyzes the syntax of the text and provides sentence boundaries and
+ // tokenization along with part of speech tags, dependency trees, and other
+ // properties.
+ rpc AnalyzeSyntax(AnalyzeSyntaxRequest) returns (AnalyzeSyntaxResponse) {
+ option (google.api.http) = { post: "/v1beta1/documents:analyzeSyntax" body: "*" };
+ }
+
+ // A convenience method that provides all the features that analyzeSentiment,
+ // analyzeEntities, and analyzeSyntax provide in one call.
+ rpc AnnotateText(AnnotateTextRequest) returns (AnnotateTextResponse) {
+ option (google.api.http) = { post: "/v1beta1/documents:annotateText" body: "*" };
+ }
+}
+
+// ################################################################ #
+//
+// Represents the input to API methods.
+message Document {
+ // The document types enum.
+ enum Type {
+ // The content type is not specified.
+ TYPE_UNSPECIFIED = 0;
+
+ // Plain text
+ PLAIN_TEXT = 1;
+
+ // HTML
+ HTML = 2;
+ }
+
+ // Required. If the type is not set or is `TYPE_UNSPECIFIED`,
+ // returns an `INVALID_ARGUMENT` error.
+ Type type = 1;
+
+ // The source of the document: a string containing the content or a
+ // Google Cloud Storage URI.
+ oneof source {
+ // The content of the input in string format.
+ string content = 2;
+
+ // The Google Cloud Storage URI where the file content is located.
+ // This URI must be of the form: gs://bucket_name/object_name. For more
+ // details, see https://cloud.google.com/storage/docs/reference-uris.
+ // NOTE: Cloud Storage object versioning is not supported.
+ string gcs_content_uri = 3;
+ }
+
+ // The language of the document (if not specified, the language is
+ // automatically detected). Both ISO and BCP-47 language codes are
+ // accepted.<br>
+ // [Language Support](https://cloud.google.com/natural-language/docs/languages)
+ // lists currently supported languages for each API method.
+ // If the language (either specified by the caller or automatically detected)
+ // is not supported by the called API method, an `INVALID_ARGUMENT` error
+ // is returned.
+ string language = 4;
+}
+
+// Represents a sentence in the input document.
+message Sentence {
+ // The sentence text.
+ TextSpan text = 1;
+
+ // For calls to [AnalyzeSentiment][] or if
+ // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1beta1.AnnotateTextRequest.Features.extract_document_sentiment] is set to
+ // true, this field will contain the sentiment for the sentence.
+ Sentiment sentiment = 2;
+}
+
+// Represents a phrase in the text that is a known entity, such as
+// a person, an organization, or location. The API associates information, such
+// as salience and mentions, with entities.
+message Entity {
+ // The type of the entity.
+ enum Type {
+ // Unknown
+ UNKNOWN = 0;
+
+ // Person
+ PERSON = 1;
+
+ // Location
+ LOCATION = 2;
+
+ // Organization
+ ORGANIZATION = 3;
+
+ // Event
+ EVENT = 4;
+
+ // Work of art
+ WORK_OF_ART = 5;
+
+ // Consumer goods
+ CONSUMER_GOOD = 6;
+
+ // Other types
+ OTHER = 7;
+ }
+
+ // The representative name for the entity.
+ string name = 1;
+
+ // The entity type.
+ Type type = 2;
+
+ // Metadata associated with the entity.
+ //
+ // Currently, Wikipedia URLs and Knowledge Graph MIDs are provided, if
+ // available. The associated keys are "wikipedia_url" and "mid", respectively.
+ map<string, string> metadata = 3;
+
+ // The salience score associated with the entity in the [0, 1.0] range.
+ //
+ // The salience score for an entity provides information about the
+ // importance or centrality of that entity to the entire document text.
+ // Scores closer to 0 are less salient, while scores closer to 1.0 are highly
+ // salient.
+ float salience = 4;
+
+ // The mentions of this entity in the input document. The API currently
+ // supports proper noun mentions.
+ repeated EntityMention mentions = 5;
+}
+
+// Represents the smallest syntactic building block of the text.
+message Token {
+ // The token text.
+ TextSpan text = 1;
+
+ // Parts of speech tag for this token.
+ PartOfSpeech part_of_speech = 2;
+
+ // Dependency tree parse for this token.
+ DependencyEdge dependency_edge = 3;
+
+ // [Lemma](https://en.wikipedia.org/wiki/Lemma_%28morphology%29) of the token.
+ string lemma = 4;
+}
+
+// Represents the feeling associated with the entire text or entities in
+// the text.
+message Sentiment {
+ // DEPRECATED FIELD - This field is being deprecated in
+ // favor of score. Please refer to our documentation at
+ // https://cloud.google.com/natural-language/docs for more information.
+ float polarity = 1;
+
+ // A non-negative number in the [0, +inf) range, which represents
+ // the absolute magnitude of sentiment regardless of score (positive or
+ // negative).
+ float magnitude = 2;
+
+ // Sentiment score between -1.0 (negative sentiment) and 1.0
+ // (positive sentiment).
+ float score = 3;
+}
+
+// Represents part of speech information for a token.
+message PartOfSpeech {
+ // The part of speech tags enum.
+ enum Tag {
+ // Unknown
+ UNKNOWN = 0;
+
+ // Adjective
+ ADJ = 1;
+
+ // Adposition (preposition and postposition)
+ ADP = 2;
+
+ // Adverb
+ ADV = 3;
+
+ // Conjunction
+ CONJ = 4;
+
+ // Determiner
+ DET = 5;
+
+ // Noun (common and proper)
+ NOUN = 6;
+
+ // Cardinal number
+ NUM = 7;
+
+ // Pronoun
+ PRON = 8;
+
+ // Particle or other function word
+ PRT = 9;
+
+ // Punctuation
+ PUNCT = 10;
+
+ // Verb (all tenses and modes)
+ VERB = 11;
+
+ // Other: foreign words, typos, abbreviations
+ X = 12;
+
+ // Affix
+ AFFIX = 13;
+ }
+
+ // The characteristic of a verb that expresses time flow during an event.
+ enum Aspect {
+ // Aspect is not applicable in the analyzed language or is not predicted.
+ ASPECT_UNKNOWN = 0;
+
+ // Perfective
+ PERFECTIVE = 1;
+
+ // Imperfective
+ IMPERFECTIVE = 2;
+
+ // Progressive
+ PROGRESSIVE = 3;
+ }
+
+ // The grammatical function performed by a noun or pronoun in a phrase,
+ // clause, or sentence. In some languages, other parts of speech, such as
+ // adjective and determiner, take case inflection in agreement with the noun.
+ enum Case {
+ // Case is not applicable in the analyzed language or is not predicted.
+ CASE_UNKNOWN = 0;
+
+ // Accusative
+ ACCUSATIVE = 1;
+
+ // Adverbial
+ ADVERBIAL = 2;
+
+ // Complementive
+ COMPLEMENTIVE = 3;
+
+ // Dative
+ DATIVE = 4;
+
+ // Genitive
+ GENITIVE = 5;
+
+ // Instrumental
+ INSTRUMENTAL = 6;
+
+ // Locative
+ LOCATIVE = 7;
+
+ // Nominative
+ NOMINATIVE = 8;
+
+ // Oblique
+ OBLIQUE = 9;
+
+ // Partitive
+ PARTITIVE = 10;
+
+ // Prepositional
+ PREPOSITIONAL = 11;
+
+ // Reflexive
+ REFLEXIVE_CASE = 12;
+
+ // Relative
+ RELATIVE_CASE = 13;
+
+ // Vocative
+ VOCATIVE = 14;
+ }
+
+ // Depending on the language, Form can be categorizing different forms of
+ // verbs, adjectives, adverbs, etc. For example, categorizing inflected
+ // endings of verbs and adjectives or distinguishing between short and long
+ // forms of adjectives and participles
+ enum Form {
+ // Form is not applicable in the analyzed language or is not predicted.
+ FORM_UNKNOWN = 0;
+
+ // Adnomial
+ ADNOMIAL = 1;
+
+ // Auxiliary
+ AUXILIARY = 2;
+
+ // Complementizer
+ COMPLEMENTIZER = 3;
+
+ // Final ending
+ FINAL_ENDING = 4;
+
+ // Gerund
+ GERUND = 5;
+
+ // Realis
+ REALIS = 6;
+
+ // Irrealis
+ IRREALIS = 7;
+
+ // Short form
+ SHORT = 8;
+
+ // Long form
+ LONG = 9;
+
+ // Order form
+ ORDER = 10;
+
+ // Specific form
+ SPECIFIC = 11;
+ }
+
+ // Gender classes of nouns reflected in the behaviour of associated words.
+ enum Gender {
+ // Gender is not applicable in the analyzed language or is not predicted.
+ GENDER_UNKNOWN = 0;
+
+ // Feminine
+ FEMININE = 1;
+
+ // Masculine
+ MASCULINE = 2;
+
+ // Neuter
+ NEUTER = 3;
+ }
+
+ // The grammatical feature of verbs, used for showing modality and attitude.
+ enum Mood {
+ // Mood is not applicable in the analyzed language or is not predicted.
+ MOOD_UNKNOWN = 0;
+
+ // Conditional
+ CONDITIONAL_MOOD = 1;
+
+ // Imperative
+ IMPERATIVE = 2;
+
+ // Indicative
+ INDICATIVE = 3;
+
+ // Interrogative
+ INTERROGATIVE = 4;
+
+ // Jussive
+ JUSSIVE = 5;
+
+ // Subjunctive
+ SUBJUNCTIVE = 6;
+ }
+
+ // Count distinctions.
+ enum Number {
+ // Number is not applicable in the analyzed language or is not predicted.
+ NUMBER_UNKNOWN = 0;
+
+ // Singular
+ SINGULAR = 1;
+
+ // Plural
+ PLURAL = 2;
+
+ // Dual
+ DUAL = 3;
+ }
+
+ // The distinction between the speaker, second person, third person, etc.
+ enum Person {
+ // Person is not applicable in the analyzed language or is not predicted.
+ PERSON_UNKNOWN = 0;
+
+ // First
+ FIRST = 1;
+
+ // Second
+ SECOND = 2;
+
+ // Third
+ THIRD = 3;
+
+ // Reflexive
+ REFLEXIVE_PERSON = 4;
+ }
+
+ // This category shows if the token is part of a proper name.
+ enum Proper {
+ // Proper is not applicable in the analyzed language or is not predicted.
+ PROPER_UNKNOWN = 0;
+
+ // Proper
+ PROPER = 1;
+
+ // Not proper
+ NOT_PROPER = 2;
+ }
+
+ // Reciprocal features of a pronoun.
+ enum Reciprocity {
+ // Reciprocity is not applicable in the analyzed language or is not
+ // predicted.
+ RECIPROCITY_UNKNOWN = 0;
+
+ // Reciprocal
+ RECIPROCAL = 1;
+
+ // Non-reciprocal
+ NON_RECIPROCAL = 2;
+ }
+
+ // Time reference.
+ enum Tense {
+ // Tense is not applicable in the analyzed language or is not predicted.
+ TENSE_UNKNOWN = 0;
+
+ // Conditional
+ CONDITIONAL_TENSE = 1;
+
+ // Future
+ FUTURE = 2;
+
+ // Past
+ PAST = 3;
+
+ // Present
+ PRESENT = 4;
+
+ // Imperfect
+ IMPERFECT = 5;
+
+ // Pluperfect
+ PLUPERFECT = 6;
+ }
+
+ // The relationship between the action that a verb expresses and the
+ // participants identified by its arguments.
+ enum Voice {
+ // Voice is not applicable in the analyzed language or is not predicted.
+ VOICE_UNKNOWN = 0;
+
+ // Active
+ ACTIVE = 1;
+
+ // Causative
+ CAUSATIVE = 2;
+
+ // Passive
+ PASSIVE = 3;
+ }
+
+ // The part of speech tag.
+ Tag tag = 1;
+
+ // The grammatical aspect.
+ Aspect aspect = 2;
+
+ // The grammatical case.
+ Case case = 3;
+
+ // The grammatical form.
+ Form form = 4;
+
+ // The grammatical gender.
+ Gender gender = 5;
+
+ // The grammatical mood.
+ Mood mood = 6;
+
+ // The grammatical number.
+ Number number = 7;
+
+ // The grammatical person.
+ Person person = 8;
+
+ // The grammatical properness.
+ Proper proper = 9;
+
+ // The grammatical reciprocity.
+ Reciprocity reciprocity = 10;
+
+ // The grammatical tense.
+ Tense tense = 11;
+
+ // The grammatical voice.
+ Voice voice = 12;
+}
+
+// Represents dependency parse tree information for a token.
+message DependencyEdge {
+ // The parse label enum for the token.
+ enum Label {
+ // Unknown
+ UNKNOWN = 0;
+
+ // Abbreviation modifier
+ ABBREV = 1;
+
+ // Adjectival complement
+ ACOMP = 2;
+
+ // Adverbial clause modifier
+ ADVCL = 3;
+
+ // Adverbial modifier
+ ADVMOD = 4;
+
+ // Adjectival modifier of an NP
+ AMOD = 5;
+
+ // Appositional modifier of an NP
+ APPOS = 6;
+
+ // Attribute dependent of a copular verb
+ ATTR = 7;
+
+ // Auxiliary (non-main) verb
+ AUX = 8;
+
+ // Passive auxiliary
+ AUXPASS = 9;
+
+ // Coordinating conjunction
+ CC = 10;
+
+ // Clausal complement of a verb or adjective
+ CCOMP = 11;
+
+ // Conjunct
+ CONJ = 12;
+
+ // Clausal subject
+ CSUBJ = 13;
+
+ // Clausal passive subject
+ CSUBJPASS = 14;
+
+ // Dependency (unable to determine)
+ DEP = 15;
+
+ // Determiner
+ DET = 16;
+
+ // Discourse
+ DISCOURSE = 17;
+
+ // Direct object
+ DOBJ = 18;
+
+ // Expletive
+ EXPL = 19;
+
+ // Goes with (part of a word in a text not well edited)
+ GOESWITH = 20;
+
+ // Indirect object
+ IOBJ = 21;
+
+ // Marker (word introducing a subordinate clause)
+ MARK = 22;
+
+ // Multi-word expression
+ MWE = 23;
+
+ // Multi-word verbal expression
+ MWV = 24;
+
+ // Negation modifier
+ NEG = 25;
+
+ // Noun compound modifier
+ NN = 26;
+
+ // Noun phrase used as an adverbial modifier
+ NPADVMOD = 27;
+
+ // Nominal subject
+ NSUBJ = 28;
+
+ // Passive nominal subject
+ NSUBJPASS = 29;
+
+ // Numeric modifier of a noun
+ NUM = 30;
+
+ // Element of compound number
+ NUMBER = 31;
+
+ // Punctuation mark
+ P = 32;
+
+ // Parataxis relation
+ PARATAXIS = 33;
+
+ // Participial modifier
+ PARTMOD = 34;
+
+ // The complement of a preposition is a clause
+ PCOMP = 35;
+
+ // Object of a preposition
+ POBJ = 36;
+
+ // Possession modifier
+ POSS = 37;
+
+ // Postverbal negative particle
+ POSTNEG = 38;
+
+ // Predicate complement
+ PRECOMP = 39;
+
+ // Preconjunt
+ PRECONJ = 40;
+
+ // Predeterminer
+ PREDET = 41;
+
+ // Prefix
+ PREF = 42;
+
+ // Prepositional modifier
+ PREP = 43;
+
+ // The relationship between a verb and verbal morpheme
+ PRONL = 44;
+
+ // Particle
+ PRT = 45;
+
+ // Associative or possessive marker
+ PS = 46;
+
+ // Quantifier phrase modifier
+ QUANTMOD = 47;
+
+ // Relative clause modifier
+ RCMOD = 48;
+
+ // Complementizer in relative clause
+ RCMODREL = 49;
+
+ // Ellipsis without a preceding predicate
+ RDROP = 50;
+
+ // Referent
+ REF = 51;
+
+ // Remnant
+ REMNANT = 52;
+
+ // Reparandum
+ REPARANDUM = 53;
+
+ // Root
+ ROOT = 54;
+
+ // Suffix specifying a unit of number
+ SNUM = 55;
+
+ // Suffix
+ SUFF = 56;
+
+ // Temporal modifier
+ TMOD = 57;
+
+ // Topic marker
+ TOPIC = 58;
+
+ // Clause headed by an infinite form of the verb that modifies a noun
+ VMOD = 59;
+
+ // Vocative
+ VOCATIVE = 60;
+
+ // Open clausal complement
+ XCOMP = 61;
+
+ // Name suffix
+ SUFFIX = 62;
+
+ // Name title
+ TITLE = 63;
+
+ // Adverbial phrase modifier
+ ADVPHMOD = 64;
+
+ // Causative auxiliary
+ AUXCAUS = 65;
+
+ // Helper auxiliary
+ AUXVV = 66;
+
+ // Rentaishi (Prenominal modifier)
+ DTMOD = 67;
+
+ // Foreign words
+ FOREIGN = 68;
+
+ // Keyword
+ KW = 69;
+
+ // List for chains of comparable items
+ LIST = 70;
+
+ // Nominalized clause
+ NOMC = 71;
+
+ // Nominalized clausal subject
+ NOMCSUBJ = 72;
+
+ // Nominalized clausal passive
+ NOMCSUBJPASS = 73;
+
+ // Compound of numeric modifier
+ NUMC = 74;
+
+ // Copula
+ COP = 75;
+
+ // Dislocated relation (for fronted/topicalized elements)
+ DISLOCATED = 76;
+ }
+
+ // Represents the head of this token in the dependency tree.
+ // This is the index of the token which has an arc going to this token.
+ // The index is the position of the token in the array of tokens returned
+ // by the API method. If this token is a root token, then the
+ // `head_token_index` is its own index.
+ int32 head_token_index = 1;
+
+ // The parse label for the token.
+ Label label = 2;
+}
+
+// Represents a mention for an entity in the text. Currently, proper noun
+// mentions are supported.
+message EntityMention {
+ // The supported types of mentions.
+ enum Type {
+ // Unknown
+ TYPE_UNKNOWN = 0;
+
+ // Proper name
+ PROPER = 1;
+
+ // Common noun (or noun compound)
+ COMMON = 2;
+ }
+
+ // The mention text.
+ TextSpan text = 1;
+
+ // The type of the entity mention.
+ Type type = 2;
+}
+
+// Represents an output piece of text.
+message TextSpan {
+ // The content of the output text.
+ string content = 1;
+
+ // The API calculates the beginning offset of the content in the original
+ // document according to the [EncodingType][google.cloud.language.v1beta1.EncodingType] specified in the API request.
+ int32 begin_offset = 2;
+}
+
+// The sentiment analysis request message.
+message AnalyzeSentimentRequest {
+ // Input document.
+ Document document = 1;
+
+ // The encoding type used by the API to calculate sentence offsets for the
+ // sentence sentiment.
+ EncodingType encoding_type = 2;
+}
+
+// The sentiment analysis response message.
+message AnalyzeSentimentResponse {
+ // The overall sentiment of the input document.
+ Sentiment document_sentiment = 1;
+
+ // The language of the text, which will be the same as the language specified
+ // in the request or, if not specified, the automatically-detected language.
+ // See [Document.language][google.cloud.language.v1beta1.Document.language] field for more details.
+ string language = 2;
+
+ // The sentiment for all the sentences in the document.
+ repeated Sentence sentences = 3;
+}
+
+// The entity analysis request message.
+message AnalyzeEntitiesRequest {
+ // Input document.
+ Document document = 1;
+
+ // The encoding type used by the API to calculate offsets.
+ EncodingType encoding_type = 2;
+}
+
+// The entity analysis response message.
+message AnalyzeEntitiesResponse {
+ // The recognized entities in the input document.
+ repeated Entity entities = 1;
+
+ // The language of the text, which will be the same as the language specified
+ // in the request or, if not specified, the automatically-detected language.
+ // See [Document.language][google.cloud.language.v1beta1.Document.language] field for more details.
+ string language = 2;
+}
+
+// The syntax analysis request message.
+message AnalyzeSyntaxRequest {
+ // Input document.
+ Document document = 1;
+
+ // The encoding type used by the API to calculate offsets.
+ EncodingType encoding_type = 2;
+}
+
+// The syntax analysis response message.
+message AnalyzeSyntaxResponse {
+ // Sentences in the input document.
+ repeated Sentence sentences = 1;
+
+ // Tokens, along with their syntactic information, in the input document.
+ repeated Token tokens = 2;
+
+ // The language of the text, which will be the same as the language specified
+ // in the request or, if not specified, the automatically-detected language.
+ // See [Document.language][google.cloud.language.v1beta1.Document.language] field for more details.
+ string language = 3;
+}
+
+// The request message for the text annotation API, which can perform multiple
+// analysis types (sentiment, entities, and syntax) in one call.
+message AnnotateTextRequest {
+ // All available features for sentiment, syntax, and semantic analysis.
+ // Setting each one to true will enable that specific analysis for the input.
+ message Features {
+ // Extract syntax information.
+ bool extract_syntax = 1;
+
+ // Extract entities.
+ bool extract_entities = 2;
+
+ // Extract document-level sentiment.
+ bool extract_document_sentiment = 3;
+ }
+
+ // Input document.
+ Document document = 1;
+
+ // The enabled features.
+ Features features = 2;
+
+ // The encoding type used by the API to calculate offsets.
+ EncodingType encoding_type = 3;
+}
+
+// The text annotations response message.
+message AnnotateTextResponse {
+ // Sentences in the input document. Populated if the user enables
+ // [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1beta1.AnnotateTextRequest.Features.extract_syntax].
+ repeated Sentence sentences = 1;
+
+ // Tokens, along with their syntactic information, in the input document.
+ // Populated if the user enables
+ // [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1beta1.AnnotateTextRequest.Features.extract_syntax].
+ repeated Token tokens = 2;
+
+ // Entities, along with their semantic information, in the input document.
+ // Populated if the user enables
+ // [AnnotateTextRequest.Features.extract_entities][google.cloud.language.v1beta1.AnnotateTextRequest.Features.extract_entities].
+ repeated Entity entities = 3;
+
+ // The overall sentiment for the document. Populated if the user enables
+ // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1beta1.AnnotateTextRequest.Features.extract_document_sentiment].
+ Sentiment document_sentiment = 4;
+
+ // The language of the text, which will be the same as the language specified
+ // in the request or, if not specified, the automatically-detected language.
+ // See [Document.language][google.cloud.language.v1beta1.Document.language] field for more details.
+ string language = 5;
+}
+
+// Represents the text encoding that the caller uses to process the output.
+// Providing an `EncodingType` is recommended because the API provides the
+// beginning offsets for various outputs, such as tokens and mentions, and
+// languages that natively use different text encodings may access offsets
+// differently.
+enum EncodingType {
+ // If `EncodingType` is not specified, encoding-dependent information (such as
+ // `begin_offset`) will be set at `-1`.
+ NONE = 0;
+
+ // Encoding-dependent information (such as `begin_offset`) is calculated based
+ // on the UTF-8 encoding of the input. C++ and Go are examples of languages
+ // that use this encoding natively.
+ UTF8 = 1;
+
+ // Encoding-dependent information (such as `begin_offset`) is calculated based
+ // on the UTF-16 encoding of the input. Java and Javascript are examples of
+ // languages that use this encoding natively.
+ UTF16 = 2;
+
+ // Encoding-dependent information (such as `begin_offset`) is calculated based
+ // on the UTF-32 encoding of the input. Python is an example of a language
+ // that uses this encoding natively.
+ UTF32 = 3;
+}
diff --git a/third_party/googleapis/google/cloud/language/v1beta2/language_gapic.yaml b/third_party/googleapis/google/cloud/language/v1beta2/language_gapic.yaml
new file mode 100644
index 0000000000..fc9924f014
--- /dev/null
+++ b/third_party/googleapis/google/cloud/language/v1beta2/language_gapic.yaml
@@ -0,0 +1,106 @@
+type: com.google.api.codegen.ConfigProto
+language_settings:
+ java:
+ package_name: com.google.cloud.language.spi.v1beta2
+ python:
+ package_name: google.cloud.gapic.language.v1beta2
+ go:
+ package_name: cloud.google.com/go/language/apiv1beta2
+ csharp:
+ package_name: Google.Cloud.Language.V1beta2
+ ruby:
+ package_name: Google::Cloud::Language::V1beta2
+ php:
+ package_name: Google\Cloud\Language\V1beta2
+ nodejs:
+ package_name: language.v1beta2
+ domain_layer_location: google-cloud
+license_header:
+ copyright_file: copyright-google.txt
+ license_file: license-header-apache-2.0.txt
+interfaces:
+- name: google.cloud.language.v1beta2.LanguageService
+ collections: []
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 60000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 60000
+ total_timeout_millis: 600000
+ methods:
+ - name: AnalyzeSentiment
+ flattening:
+ groups:
+ - parameters:
+ - document
+ required_fields:
+ - document
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ - name: AnalyzeEntities
+ flattening:
+ groups:
+ - parameters:
+ - document
+ - encoding_type
+ required_fields:
+ - document
+ - encoding_type
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ - name: AnalyzeEntitySentiment
+ flattening:
+ groups:
+ - parameters:
+ - document
+ - encoding_type
+ required_fields:
+ - document
+ - encoding_type
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ - name: AnalyzeSyntax
+ flattening:
+ groups:
+ - parameters:
+ - document
+ - encoding_type
+ required_fields:
+ - document
+ - encoding_type
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ - name: AnnotateText
+ flattening:
+ groups:
+ - parameters:
+ - document
+ - features
+ - encoding_type
+ required_fields:
+ - document
+ - features
+ - encoding_type
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 30000
diff --git a/third_party/googleapis/google/cloud/language/v1beta2/language_service.proto b/third_party/googleapis/google/cloud/language/v1beta2/language_service.proto
new file mode 100644
index 0000000000..f79d07055a
--- /dev/null
+++ b/third_party/googleapis/google/cloud/language/v1beta2/language_service.proto
@@ -0,0 +1,989 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.language.v1beta2;
+
+import "google/api/annotations.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/language/v1beta2;language";
+option java_multiple_files = true;
+option java_outer_classname = "LanguageServiceProto";
+option java_package = "com.google.cloud.language.v1beta2";
+
+
+// Provides text analysis operations such as sentiment analysis and entity
+// recognition.
+service LanguageService {
+ // Analyzes the sentiment of the provided text.
+ rpc AnalyzeSentiment(AnalyzeSentimentRequest) returns (AnalyzeSentimentResponse) {
+ option (google.api.http) = { post: "/v1beta2/documents:analyzeSentiment" body: "*" };
+ }
+
+ // Finds named entities (currently proper names and common nouns) in the text
+ // along with entity types, salience, mentions for each entity, and
+ // other properties.
+ rpc AnalyzeEntities(AnalyzeEntitiesRequest) returns (AnalyzeEntitiesResponse) {
+ option (google.api.http) = { post: "/v1beta2/documents:analyzeEntities" body: "*" };
+ }
+
+ // Finds entities, similar to [AnalyzeEntities][google.cloud.language.v1beta2.LanguageService.AnalyzeEntities] in the text and analyzes
+ // sentiment associated with each entity and its mentions.
+ rpc AnalyzeEntitySentiment(AnalyzeEntitySentimentRequest) returns (AnalyzeEntitySentimentResponse) {
+ option (google.api.http) = { post: "/v1beta2/documents:analyzeEntitySentiment" body: "*" };
+ }
+
+ // Analyzes the syntax of the text and provides sentence boundaries and
+ // tokenization along with part of speech tags, dependency trees, and other
+ // properties.
+ rpc AnalyzeSyntax(AnalyzeSyntaxRequest) returns (AnalyzeSyntaxResponse) {
+ option (google.api.http) = { post: "/v1beta2/documents:analyzeSyntax" body: "*" };
+ }
+
+ // A convenience method that provides all syntax, sentiment, and entity
+ // features in one call.
+ rpc AnnotateText(AnnotateTextRequest) returns (AnnotateTextResponse) {
+ option (google.api.http) = { post: "/v1beta2/documents:annotateText" body: "*" };
+ }
+}
+
+// ################################################################ #
+//
+// Represents the input to API methods.
+message Document {
+ // The document types enum.
+ enum Type {
+ // The content type is not specified.
+ TYPE_UNSPECIFIED = 0;
+
+ // Plain text
+ PLAIN_TEXT = 1;
+
+ // HTML
+ HTML = 2;
+ }
+
+ // Required. If the type is not set or is `TYPE_UNSPECIFIED`,
+ // returns an `INVALID_ARGUMENT` error.
+ Type type = 1;
+
+ // The source of the document: a string containing the content or a
+ // Google Cloud Storage URI.
+ oneof source {
+ // The content of the input in string format.
+ string content = 2;
+
+ // The Google Cloud Storage URI where the file content is located.
+ // This URI must be of the form: gs://bucket_name/object_name. For more
+ // details, see https://cloud.google.com/storage/docs/reference-uris.
+ // NOTE: Cloud Storage object versioning is not supported.
+ string gcs_content_uri = 3;
+ }
+
+ // The language of the document (if not specified, the language is
+ // automatically detected). Both ISO and BCP-47 language codes are
+ // accepted.<br>
+ // [Language Support](https://cloud.google.com/natural-language/docs/languages)
+ // lists currently supported languages for each API method.
+ // If the language (either specified by the caller or automatically detected)
+ // is not supported by the called API method, an `INVALID_ARGUMENT` error
+ // is returned.
+ string language = 4;
+}
+
+// Represents a sentence in the input document.
+message Sentence {
+ // The sentence text.
+ TextSpan text = 1;
+
+ // For calls to [AnalyzeSentiment][] or if
+ // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_document_sentiment] is set to
+ // true, this field will contain the sentiment for the sentence.
+ Sentiment sentiment = 2;
+}
+
+// Represents a phrase in the text that is a known entity, such as
+// a person, an organization, or location. The API associates information, such
+// as salience and mentions, with entities.
+message Entity {
+ // The type of the entity.
+ enum Type {
+ // Unknown
+ UNKNOWN = 0;
+
+ // Person
+ PERSON = 1;
+
+ // Location
+ LOCATION = 2;
+
+ // Organization
+ ORGANIZATION = 3;
+
+ // Event
+ EVENT = 4;
+
+ // Work of art
+ WORK_OF_ART = 5;
+
+ // Consumer goods
+ CONSUMER_GOOD = 6;
+
+ // Other types
+ OTHER = 7;
+ }
+
+ // The representative name for the entity.
+ string name = 1;
+
+ // The entity type.
+ Type type = 2;
+
+ // Metadata associated with the entity.
+ //
+ // Currently, Wikipedia URLs and Knowledge Graph MIDs are provided, if
+ // available. The associated keys are "wikipedia_url" and "mid", respectively.
+ map<string, string> metadata = 3;
+
+ // The salience score associated with the entity in the [0, 1.0] range.
+ //
+ // The salience score for an entity provides information about the
+ // importance or centrality of that entity to the entire document text.
+ // Scores closer to 0 are less salient, while scores closer to 1.0 are highly
+ // salient.
+ float salience = 4;
+
+ // The mentions of this entity in the input document. The API currently
+ // supports proper noun mentions.
+ repeated EntityMention mentions = 5;
+
+ // For calls to [AnalyzeEntitySentiment][] or if
+ // [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_entity_sentiment] is set to
+ // true, this field will contain the aggregate sentiment expressed for this
+ // entity in the provided document.
+ Sentiment sentiment = 6;
+}
+
+// Represents the smallest syntactic building block of the text.
+message Token {
+ // The token text.
+ TextSpan text = 1;
+
+ // Parts of speech tag for this token.
+ PartOfSpeech part_of_speech = 2;
+
+ // Dependency tree parse for this token.
+ DependencyEdge dependency_edge = 3;
+
+ // [Lemma](https://en.wikipedia.org/wiki/Lemma_%28morphology%29) of the token.
+ string lemma = 4;
+}
+
+// Represents the feeling associated with the entire text or entities in
+// the text.
+message Sentiment {
+ // A non-negative number in the [0, +inf) range, which represents
+ // the absolute magnitude of sentiment regardless of score (positive or
+ // negative).
+ float magnitude = 2;
+
+ // Sentiment score between -1.0 (negative sentiment) and 1.0
+ // (positive sentiment).
+ float score = 3;
+}
+
+// Represents part of speech information for a token.
+message PartOfSpeech {
+ // The part of speech tags enum.
+ enum Tag {
+ // Unknown
+ UNKNOWN = 0;
+
+ // Adjective
+ ADJ = 1;
+
+ // Adposition (preposition and postposition)
+ ADP = 2;
+
+ // Adverb
+ ADV = 3;
+
+ // Conjunction
+ CONJ = 4;
+
+ // Determiner
+ DET = 5;
+
+ // Noun (common and proper)
+ NOUN = 6;
+
+ // Cardinal number
+ NUM = 7;
+
+ // Pronoun
+ PRON = 8;
+
+ // Particle or other function word
+ PRT = 9;
+
+ // Punctuation
+ PUNCT = 10;
+
+ // Verb (all tenses and modes)
+ VERB = 11;
+
+ // Other: foreign words, typos, abbreviations
+ X = 12;
+
+ // Affix
+ AFFIX = 13;
+ }
+
+ // The characteristic of a verb that expresses time flow during an event.
+ enum Aspect {
+ // Aspect is not applicable in the analyzed language or is not predicted.
+ ASPECT_UNKNOWN = 0;
+
+ // Perfective
+ PERFECTIVE = 1;
+
+ // Imperfective
+ IMPERFECTIVE = 2;
+
+ // Progressive
+ PROGRESSIVE = 3;
+ }
+
+ // The grammatical function performed by a noun or pronoun in a phrase,
+ // clause, or sentence. In some languages, other parts of speech, such as
+ // adjective and determiner, take case inflection in agreement with the noun.
+ enum Case {
+ // Case is not applicable in the analyzed language or is not predicted.
+ CASE_UNKNOWN = 0;
+
+ // Accusative
+ ACCUSATIVE = 1;
+
+ // Adverbial
+ ADVERBIAL = 2;
+
+ // Complementive
+ COMPLEMENTIVE = 3;
+
+ // Dative
+ DATIVE = 4;
+
+ // Genitive
+ GENITIVE = 5;
+
+ // Instrumental
+ INSTRUMENTAL = 6;
+
+ // Locative
+ LOCATIVE = 7;
+
+ // Nominative
+ NOMINATIVE = 8;
+
+ // Oblique
+ OBLIQUE = 9;
+
+ // Partitive
+ PARTITIVE = 10;
+
+ // Prepositional
+ PREPOSITIONAL = 11;
+
+ // Reflexive
+ REFLEXIVE_CASE = 12;
+
+ // Relative
+ RELATIVE_CASE = 13;
+
+ // Vocative
+ VOCATIVE = 14;
+ }
+
+ // Depending on the language, Form can be categorizing different forms of
+ // verbs, adjectives, adverbs, etc. For example, categorizing inflected
+ // endings of verbs and adjectives or distinguishing between short and long
+ // forms of adjectives and participles
+ enum Form {
+ // Form is not applicable in the analyzed language or is not predicted.
+ FORM_UNKNOWN = 0;
+
+ // Adnomial
+ ADNOMIAL = 1;
+
+ // Auxiliary
+ AUXILIARY = 2;
+
+ // Complementizer
+ COMPLEMENTIZER = 3;
+
+ // Final ending
+ FINAL_ENDING = 4;
+
+ // Gerund
+ GERUND = 5;
+
+ // Realis
+ REALIS = 6;
+
+ // Irrealis
+ IRREALIS = 7;
+
+ // Short form
+ SHORT = 8;
+
+ // Long form
+ LONG = 9;
+
+ // Order form
+ ORDER = 10;
+
+ // Specific form
+ SPECIFIC = 11;
+ }
+
+ // Gender classes of nouns reflected in the behaviour of associated words.
+ enum Gender {
+ // Gender is not applicable in the analyzed language or is not predicted.
+ GENDER_UNKNOWN = 0;
+
+ // Feminine
+ FEMININE = 1;
+
+ // Masculine
+ MASCULINE = 2;
+
+ // Neuter
+ NEUTER = 3;
+ }
+
+ // The grammatical feature of verbs, used for showing modality and attitude.
+ enum Mood {
+ // Mood is not applicable in the analyzed language or is not predicted.
+ MOOD_UNKNOWN = 0;
+
+ // Conditional
+ CONDITIONAL_MOOD = 1;
+
+ // Imperative
+ IMPERATIVE = 2;
+
+ // Indicative
+ INDICATIVE = 3;
+
+ // Interrogative
+ INTERROGATIVE = 4;
+
+ // Jussive
+ JUSSIVE = 5;
+
+ // Subjunctive
+ SUBJUNCTIVE = 6;
+ }
+
+ // Count distinctions.
+ enum Number {
+ // Number is not applicable in the analyzed language or is not predicted.
+ NUMBER_UNKNOWN = 0;
+
+ // Singular
+ SINGULAR = 1;
+
+ // Plural
+ PLURAL = 2;
+
+ // Dual
+ DUAL = 3;
+ }
+
+ // The distinction between the speaker, second person, third person, etc.
+ enum Person {
+ // Person is not applicable in the analyzed language or is not predicted.
+ PERSON_UNKNOWN = 0;
+
+ // First
+ FIRST = 1;
+
+ // Second
+ SECOND = 2;
+
+ // Third
+ THIRD = 3;
+
+ // Reflexive
+ REFLEXIVE_PERSON = 4;
+ }
+
+ // This category shows if the token is part of a proper name.
+ enum Proper {
+ // Proper is not applicable in the analyzed language or is not predicted.
+ PROPER_UNKNOWN = 0;
+
+ // Proper
+ PROPER = 1;
+
+ // Not proper
+ NOT_PROPER = 2;
+ }
+
+ // Reciprocal features of a pronoun.
+ enum Reciprocity {
+ // Reciprocity is not applicable in the analyzed language or is not
+ // predicted.
+ RECIPROCITY_UNKNOWN = 0;
+
+ // Reciprocal
+ RECIPROCAL = 1;
+
+ // Non-reciprocal
+ NON_RECIPROCAL = 2;
+ }
+
+ // Time reference.
+ enum Tense {
+ // Tense is not applicable in the analyzed language or is not predicted.
+ TENSE_UNKNOWN = 0;
+
+ // Conditional
+ CONDITIONAL_TENSE = 1;
+
+ // Future
+ FUTURE = 2;
+
+ // Past
+ PAST = 3;
+
+ // Present
+ PRESENT = 4;
+
+ // Imperfect
+ IMPERFECT = 5;
+
+ // Pluperfect
+ PLUPERFECT = 6;
+ }
+
+ // The relationship between the action that a verb expresses and the
+ // participants identified by its arguments.
+ enum Voice {
+ // Voice is not applicable in the analyzed language or is not predicted.
+ VOICE_UNKNOWN = 0;
+
+ // Active
+ ACTIVE = 1;
+
+ // Causative
+ CAUSATIVE = 2;
+
+ // Passive
+ PASSIVE = 3;
+ }
+
+ // The part of speech tag.
+ Tag tag = 1;
+
+ // The grammatical aspect.
+ Aspect aspect = 2;
+
+ // The grammatical case.
+ Case case = 3;
+
+ // The grammatical form.
+ Form form = 4;
+
+ // The grammatical gender.
+ Gender gender = 5;
+
+ // The grammatical mood.
+ Mood mood = 6;
+
+ // The grammatical number.
+ Number number = 7;
+
+ // The grammatical person.
+ Person person = 8;
+
+ // The grammatical properness.
+ Proper proper = 9;
+
+ // The grammatical reciprocity.
+ Reciprocity reciprocity = 10;
+
+ // The grammatical tense.
+ Tense tense = 11;
+
+ // The grammatical voice.
+ Voice voice = 12;
+}
+
+// Represents dependency parse tree information for a token.
+message DependencyEdge {
+ // The parse label enum for the token.
+ enum Label {
+ // Unknown
+ UNKNOWN = 0;
+
+ // Abbreviation modifier
+ ABBREV = 1;
+
+ // Adjectival complement
+ ACOMP = 2;
+
+ // Adverbial clause modifier
+ ADVCL = 3;
+
+ // Adverbial modifier
+ ADVMOD = 4;
+
+ // Adjectival modifier of an NP
+ AMOD = 5;
+
+ // Appositional modifier of an NP
+ APPOS = 6;
+
+ // Attribute dependent of a copular verb
+ ATTR = 7;
+
+ // Auxiliary (non-main) verb
+ AUX = 8;
+
+ // Passive auxiliary
+ AUXPASS = 9;
+
+ // Coordinating conjunction
+ CC = 10;
+
+ // Clausal complement of a verb or adjective
+ CCOMP = 11;
+
+ // Conjunct
+ CONJ = 12;
+
+ // Clausal subject
+ CSUBJ = 13;
+
+ // Clausal passive subject
+ CSUBJPASS = 14;
+
+ // Dependency (unable to determine)
+ DEP = 15;
+
+ // Determiner
+ DET = 16;
+
+ // Discourse
+ DISCOURSE = 17;
+
+ // Direct object
+ DOBJ = 18;
+
+ // Expletive
+ EXPL = 19;
+
+ // Goes with (part of a word in a text not well edited)
+ GOESWITH = 20;
+
+ // Indirect object
+ IOBJ = 21;
+
+ // Marker (word introducing a subordinate clause)
+ MARK = 22;
+
+ // Multi-word expression
+ MWE = 23;
+
+ // Multi-word verbal expression
+ MWV = 24;
+
+ // Negation modifier
+ NEG = 25;
+
+ // Noun compound modifier
+ NN = 26;
+
+ // Noun phrase used as an adverbial modifier
+ NPADVMOD = 27;
+
+ // Nominal subject
+ NSUBJ = 28;
+
+ // Passive nominal subject
+ NSUBJPASS = 29;
+
+ // Numeric modifier of a noun
+ NUM = 30;
+
+ // Element of compound number
+ NUMBER = 31;
+
+ // Punctuation mark
+ P = 32;
+
+ // Parataxis relation
+ PARATAXIS = 33;
+
+ // Participial modifier
+ PARTMOD = 34;
+
+ // The complement of a preposition is a clause
+ PCOMP = 35;
+
+ // Object of a preposition
+ POBJ = 36;
+
+ // Possession modifier
+ POSS = 37;
+
+ // Postverbal negative particle
+ POSTNEG = 38;
+
+ // Predicate complement
+ PRECOMP = 39;
+
+ // Preconjunt
+ PRECONJ = 40;
+
+ // Predeterminer
+ PREDET = 41;
+
+ // Prefix
+ PREF = 42;
+
+ // Prepositional modifier
+ PREP = 43;
+
+ // The relationship between a verb and verbal morpheme
+ PRONL = 44;
+
+ // Particle
+ PRT = 45;
+
+ // Associative or possessive marker
+ PS = 46;
+
+ // Quantifier phrase modifier
+ QUANTMOD = 47;
+
+ // Relative clause modifier
+ RCMOD = 48;
+
+ // Complementizer in relative clause
+ RCMODREL = 49;
+
+ // Ellipsis without a preceding predicate
+ RDROP = 50;
+
+ // Referent
+ REF = 51;
+
+ // Remnant
+ REMNANT = 52;
+
+ // Reparandum
+ REPARANDUM = 53;
+
+ // Root
+ ROOT = 54;
+
+ // Suffix specifying a unit of number
+ SNUM = 55;
+
+ // Suffix
+ SUFF = 56;
+
+ // Temporal modifier
+ TMOD = 57;
+
+ // Topic marker
+ TOPIC = 58;
+
+ // Clause headed by an infinite form of the verb that modifies a noun
+ VMOD = 59;
+
+ // Vocative
+ VOCATIVE = 60;
+
+ // Open clausal complement
+ XCOMP = 61;
+
+ // Name suffix
+ SUFFIX = 62;
+
+ // Name title
+ TITLE = 63;
+
+ // Adverbial phrase modifier
+ ADVPHMOD = 64;
+
+ // Causative auxiliary
+ AUXCAUS = 65;
+
+ // Helper auxiliary
+ AUXVV = 66;
+
+ // Rentaishi (Prenominal modifier)
+ DTMOD = 67;
+
+ // Foreign words
+ FOREIGN = 68;
+
+ // Keyword
+ KW = 69;
+
+ // List for chains of comparable items
+ LIST = 70;
+
+ // Nominalized clause
+ NOMC = 71;
+
+ // Nominalized clausal subject
+ NOMCSUBJ = 72;
+
+ // Nominalized clausal passive
+ NOMCSUBJPASS = 73;
+
+ // Compound of numeric modifier
+ NUMC = 74;
+
+ // Copula
+ COP = 75;
+
+ // Dislocated relation (for fronted/topicalized elements)
+ DISLOCATED = 76;
+ }
+
+ // Represents the head of this token in the dependency tree.
+ // This is the index of the token which has an arc going to this token.
+ // The index is the position of the token in the array of tokens returned
+ // by the API method. If this token is a root token, then the
+ // `head_token_index` is its own index.
+ int32 head_token_index = 1;
+
+ // The parse label for the token.
+ Label label = 2;
+}
+
+// Represents a mention for an entity in the text. Currently, proper noun
+// mentions are supported.
+message EntityMention {
+ // The supported types of mentions.
+ enum Type {
+ // Unknown
+ TYPE_UNKNOWN = 0;
+
+ // Proper name
+ PROPER = 1;
+
+ // Common noun (or noun compound)
+ COMMON = 2;
+ }
+
+ // The mention text.
+ TextSpan text = 1;
+
+ // The type of the entity mention.
+ Type type = 2;
+
+ // For calls to [AnalyzeEntitySentiment][] or if
+ // [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_entity_sentiment] is set to
+ // true, this field will contain the sentiment expressed for this mention of
+ // the entity in the provided document.
+ Sentiment sentiment = 3;
+}
+
+// Represents an output piece of text.
+message TextSpan {
+ // The content of the output text.
+ string content = 1;
+
+ // The API calculates the beginning offset of the content in the original
+ // document according to the [EncodingType][google.cloud.language.v1beta2.EncodingType] specified in the API request.
+ int32 begin_offset = 2;
+}
+
+// The sentiment analysis request message.
+message AnalyzeSentimentRequest {
+ // Input document.
+ Document document = 1;
+
+ // The encoding type used by the API to calculate sentence offsets for the
+ // sentence sentiment.
+ EncodingType encoding_type = 2;
+}
+
+// The sentiment analysis response message.
+message AnalyzeSentimentResponse {
+ // The overall sentiment of the input document.
+ Sentiment document_sentiment = 1;
+
+ // The language of the text, which will be the same as the language specified
+ // in the request or, if not specified, the automatically-detected language.
+ // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details.
+ string language = 2;
+
+ // The sentiment for all the sentences in the document.
+ repeated Sentence sentences = 3;
+}
+
+// The entity-level sentiment analysis request message.
+message AnalyzeEntitySentimentRequest {
+ // Input document.
+ Document document = 1;
+
+ // The encoding type used by the API to calculate offsets.
+ EncodingType encoding_type = 2;
+}
+
+// The entity-level sentiment analysis response message.
+message AnalyzeEntitySentimentResponse {
+ // The recognized entities in the input document with associated sentiments.
+ repeated Entity entities = 1;
+
+ // The language of the text, which will be the same as the language specified
+ // in the request or, if not specified, the automatically-detected language.
+ // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details.
+ string language = 2;
+}
+
+// The entity analysis request message.
+message AnalyzeEntitiesRequest {
+ // Input document.
+ Document document = 1;
+
+ // The encoding type used by the API to calculate offsets.
+ EncodingType encoding_type = 2;
+}
+
+// The entity analysis response message.
+message AnalyzeEntitiesResponse {
+ // The recognized entities in the input document.
+ repeated Entity entities = 1;
+
+ // The language of the text, which will be the same as the language specified
+ // in the request or, if not specified, the automatically-detected language.
+ // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details.
+ string language = 2;
+}
+
+// The syntax analysis request message.
+message AnalyzeSyntaxRequest {
+ // Input document.
+ Document document = 1;
+
+ // The encoding type used by the API to calculate offsets.
+ EncodingType encoding_type = 2;
+}
+
+// The syntax analysis response message.
+message AnalyzeSyntaxResponse {
+ // Sentences in the input document.
+ repeated Sentence sentences = 1;
+
+ // Tokens, along with their syntactic information, in the input document.
+ repeated Token tokens = 2;
+
+ // The language of the text, which will be the same as the language specified
+ // in the request or, if not specified, the automatically-detected language.
+ // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details.
+ string language = 3;
+}
+
+// The request message for the text annotation API, which can perform multiple
+// analysis types (sentiment, entities, and syntax) in one call.
+message AnnotateTextRequest {
+ // All available features for sentiment, syntax, and semantic analysis.
+ // Setting each one to true will enable that specific analysis for the input.
+ message Features {
+ // Extract syntax information.
+ bool extract_syntax = 1;
+
+ // Extract entities.
+ bool extract_entities = 2;
+
+ // Extract document-level sentiment.
+ bool extract_document_sentiment = 3;
+
+ // Extract entities and their associated sentiment.
+ bool extract_entity_sentiment = 4;
+ }
+
+ // Input document.
+ Document document = 1;
+
+ // The enabled features.
+ Features features = 2;
+
+ // The encoding type used by the API to calculate offsets.
+ EncodingType encoding_type = 3;
+}
+
+// The text annotations response message.
+message AnnotateTextResponse {
+ // Sentences in the input document. Populated if the user enables
+ // [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_syntax].
+ repeated Sentence sentences = 1;
+
+ // Tokens, along with their syntactic information, in the input document.
+ // Populated if the user enables
+ // [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_syntax].
+ repeated Token tokens = 2;
+
+ // Entities, along with their semantic information, in the input document.
+ // Populated if the user enables
+ // [AnnotateTextRequest.Features.extract_entities][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_entities].
+ repeated Entity entities = 3;
+
+ // The overall sentiment for the document. Populated if the user enables
+ // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_document_sentiment].
+ Sentiment document_sentiment = 4;
+
+ // The language of the text, which will be the same as the language specified
+ // in the request or, if not specified, the automatically-detected language.
+ // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details.
+ string language = 5;
+}
+
+// Represents the text encoding that the caller uses to process the output.
+// Providing an `EncodingType` is recommended because the API provides the
+// beginning offsets for various outputs, such as tokens and mentions, and
+// languages that natively use different text encodings may access offsets
+// differently.
+enum EncodingType {
+ // If `EncodingType` is not specified, encoding-dependent information (such as
+ // `begin_offset`) will be set at `-1`.
+ NONE = 0;
+
+ // Encoding-dependent information (such as `begin_offset`) is calculated based
+ // on the UTF-8 encoding of the input. C++ and Go are examples of languages
+ // that use this encoding natively.
+ UTF8 = 1;
+
+ // Encoding-dependent information (such as `begin_offset`) is calculated based
+ // on the UTF-16 encoding of the input. Java and Javascript are examples of
+ // languages that use this encoding natively.
+ UTF16 = 2;
+
+ // Encoding-dependent information (such as `begin_offset`) is calculated based
+ // on the UTF-32 encoding of the input. Python is an example of a language
+ // that uses this encoding natively.
+ UTF32 = 3;
+}
diff --git a/third_party/googleapis/google/cloud/ml/v1/job_service.proto b/third_party/googleapis/google/cloud/ml/v1/job_service.proto
new file mode 100644
index 0000000000..93beed1c07
--- /dev/null
+++ b/third_party/googleapis/google/cloud/ml/v1/job_service.proto
@@ -0,0 +1,605 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.ml.v1;
+
+import "google/api/annotations.proto";
+import "google/api/auth.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/ml/v1;ml";
+option java_multiple_files = true;
+option java_outer_classname = "JobServiceProto";
+option java_package = "com.google.cloud.ml.api.v1";
+
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Proto file for the Google Cloud Machine Learning Engine.
+// Describes the 'job service' to manage training and prediction jobs.
+
+
+
+// Service to create and manage training and batch prediction jobs.
+service JobService {
+ // Creates a training or a batch prediction job.
+ rpc CreateJob(CreateJobRequest) returns (Job) {
+ option (google.api.http) = { post: "/v1/{parent=projects/*}/jobs" body: "job" };
+ }
+
+ // Lists the jobs in the project.
+ rpc ListJobs(ListJobsRequest) returns (ListJobsResponse) {
+ option (google.api.http) = { get: "/v1/{parent=projects/*}/jobs" };
+ }
+
+ // Describes a job.
+ rpc GetJob(GetJobRequest) returns (Job) {
+ option (google.api.http) = { get: "/v1/{name=projects/*/jobs/*}" };
+ }
+
+ // Cancels a running job.
+ rpc CancelJob(CancelJobRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { post: "/v1/{name=projects/*/jobs/*}:cancel" body: "*" };
+ }
+}
+
+// Represents input parameters for a training job.
+message TrainingInput {
+ // A scale tier is an abstract representation of the resources Cloud ML
+ // will allocate to a training job. When selecting a scale tier for your
+ // training job, you should consider the size of your training dataset and
+ // the complexity of your model. As the tiers increase, virtual machines are
+ // added to handle your job, and the individual machines in the cluster
+ // generally have more memory and greater processing power than they do at
+ // lower tiers. The number of training units charged per hour of processing
+ // increases as tiers get more advanced. Refer to the
+ // [pricing guide](/ml/pricing) for more details. Note that in addition to
+ // incurring costs, your use of training resources is constrained by the
+ // [quota policy](/ml/quota).
+ enum ScaleTier {
+ // A single worker instance. This tier is suitable for learning how to use
+ // Cloud ML, and for experimenting with new models using small datasets.
+ BASIC = 0;
+
+ // Many workers and a few parameter servers.
+ STANDARD_1 = 1;
+
+ // A large number of workers with many parameter servers.
+ PREMIUM_1 = 3;
+
+ // A single worker instance [with a GPU](ml/docs/how-tos/using-gpus).
+ BASIC_GPU = 6;
+
+ // The CUSTOM tier is not a set tier, but rather enables you to use your
+ // own cluster specification. When you use this tier, set values to
+ // configure your processing cluster according to these guidelines:
+ //
+ // * You _must_ set `TrainingInput.masterType` to specify the type
+ // of machine to use for your master node. This is the only required
+ // setting.
+ //
+ // * You _may_ set `TrainingInput.workerCount` to specify the number of
+ // workers to use. If you specify one or more workers, you _must_ also
+ // set `TrainingInput.workerType` to specify the type of machine to use
+ // for your worker nodes.
+ //
+ // * You _may_ set `TrainingInput.parameterServerCount` to specify the
+ // number of parameter servers to use. If you specify one or more
+ // parameter servers, you _must_ also set
+ // `TrainingInput.parameterServerType` to specify the type of machine to
+ // use for your parameter servers.
+ //
+ // Note that all of your workers must use the same machine type, which can
+ // be different from your parameter server type and master type. Your
+ // parameter servers must likewise use the same machine type, which can be
+ // different from your worker type and master type.
+ CUSTOM = 5;
+ }
+
+ // Required. Specifies the machine types, the number of replicas for workers
+ // and parameter servers.
+ ScaleTier scale_tier = 1;
+
+ // Optional. Specifies the type of virtual machine to use for your training
+ // job's master worker.
+ //
+ // The following types are supported:
+ //
+ // <dl>
+ // <dt>standard</dt>
+ // <dd>
+ // A basic machine configuration suitable for training simple models with
+ // small to moderate datasets.
+ // </dd>
+ // <dt>large_model</dt>
+ // <dd>
+ // A machine with a lot of memory, specially suited for parameter servers
+ // when your model is large (having many hidden layers or layers with very
+ // large numbers of nodes).
+ // </dd>
+ // <dt>complex_model_s</dt>
+ // <dd>
+ // A machine suitable for the master and workers of the cluster when your
+ // model requires more computation than the standard machine can handle
+ // satisfactorily.
+ // </dd>
+ // <dt>complex_model_m</dt>
+ // <dd>
+ // A machine with roughly twice the number of cores and roughly double the
+ // memory of <code suppresswarning="true">complex_model_s</code>.
+ // </dd>
+ // <dt>complex_model_l</dt>
+ // <dd>
+ // A machine with roughly twice the number of cores and roughly double the
+ // memory of <code suppresswarning="true">complex_model_m</code>.
+ // </dd>
+ // <dt>standard_gpu</dt>
+ // <dd>
+ // A machine equivalent to <code suppresswarning="true">standard</code> that
+ // also includes a
+ // <a href="ml/docs/how-tos/using-gpus">
+ // GPU that you can use in your trainer</a>.
+ // </dd>
+ // <dt>complex_model_m_gpu</dt>
+ // <dd>
+ // A machine equivalent to
+ // <code suppresswarning="true">coplex_model_m</code> that also includes
+ // four GPUs.
+ // </dd>
+ // </dl>
+ //
+ // You must set this value when `scaleTier` is set to `CUSTOM`.
+ string master_type = 2;
+
+ // Optional. Specifies the type of virtual machine to use for your training
+ // job's worker nodes.
+ //
+ // The supported values are the same as those described in the entry for
+ // `masterType`.
+ //
+ // This value must be present when `scaleTier` is set to `CUSTOM` and
+ // `workerCount` is greater than zero.
+ string worker_type = 3;
+
+ // Optional. Specifies the type of virtual machine to use for your training
+ // job's parameter server.
+ //
+ // The supported values are the same as those described in the entry for
+ // `master_type`.
+ //
+ // This value must be present when `scaleTier` is set to `CUSTOM` and
+ // `parameter_server_count` is greater than zero.
+ string parameter_server_type = 4;
+
+ // Optional. The number of worker replicas to use for the training job. Each
+ // replica in the cluster will be of the type specified in `worker_type`.
+ //
+ // This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+ // set this value, you must also set `worker_type`.
+ int64 worker_count = 5;
+
+ // Optional. The number of parameter server replicas to use for the training
+ // job. Each replica in the cluster will be of the type specified in
+ // `parameter_server_type`.
+ //
+ // This value can only be used when `scale_tier` is set to `CUSTOM`.If you
+ // set this value, you must also set `parameter_server_type`.
+ int64 parameter_server_count = 6;
+
+ // Required. The Google Cloud Storage location of the packages with
+ // the training program and any additional dependencies.
+ repeated string package_uris = 7;
+
+ // Required. The Python module name to run after installing the packages.
+ string python_module = 8;
+
+ // Optional. Command line arguments to pass to the program.
+ repeated string args = 10;
+
+ // Optional. The set of Hyperparameters to tune.
+ HyperparameterSpec hyperparameters = 12;
+
+ // Required. The Google Compute Engine region to run the training job in.
+ string region = 14;
+
+ // Optional. A Google Cloud Storage path in which to store training outputs
+ // and other data needed for training. This path is passed to your TensorFlow
+ // program as the 'job_dir' command-line argument. The benefit of specifying
+ // this field is that Cloud ML validates the path for use in training.
+ string job_dir = 16;
+
+ // Optional. The Google Cloud ML runtime version to use for training. If not
+ // set, Google Cloud ML will choose the latest stable version.
+ string runtime_version = 15;
+}
+
+// Represents a set of hyperparameters to optimize.
+message HyperparameterSpec {
+ // The available types of optimization goals.
+ enum GoalType {
+ // Goal Type will default to maximize.
+ GOAL_TYPE_UNSPECIFIED = 0;
+
+ // Maximize the goal metric.
+ MAXIMIZE = 1;
+
+ // Minimize the goal metric.
+ MINIMIZE = 2;
+ }
+
+ // Required. The type of goal to use for tuning. Available types are
+ // `MAXIMIZE` and `MINIMIZE`.
+ //
+ // Defaults to `MAXIMIZE`.
+ GoalType goal = 1;
+
+ // Required. The set of parameters to tune.
+ repeated ParameterSpec params = 2;
+
+ // Optional. How many training trials should be attempted to optimize
+ // the specified hyperparameters.
+ //
+ // Defaults to one.
+ int32 max_trials = 3;
+
+ // Optional. The number of training trials to run concurrently.
+ // You can reduce the time it takes to perform hyperparameter tuning by adding
+ // trials in parallel. However, each trail only benefits from the information
+ // gained in completed trials. That means that a trial does not get access to
+ // the results of trials running at the same time, which could reduce the
+ // quality of the overall optimization.
+ //
+ // Each trial will use the same scale tier and machine types.
+ //
+ // Defaults to one.
+ int32 max_parallel_trials = 4;
+
+ // Optional. The Tensorflow summary tag name to use for optimizing trials. For
+ // current versions of Tensorflow, this tag name should exactly match what is
+ // shown in Tensorboard, including all scopes. For versions of Tensorflow
+ // prior to 0.12, this should be only the tag passed to tf.Summary.
+ // By default, "training/hptuning/metric" will be used.
+ string hyperparameter_metric_tag = 5;
+}
+
+// Represents a single hyperparameter to optimize.
+message ParameterSpec {
+ // The type of the parameter.
+ enum ParameterType {
+ // You must specify a valid type. Using this unspecified type will result in
+ // an error.
+ PARAMETER_TYPE_UNSPECIFIED = 0;
+
+ // Type for real-valued parameters.
+ DOUBLE = 1;
+
+ // Type for integral parameters.
+ INTEGER = 2;
+
+ // The parameter is categorical, with a value chosen from the categories
+ // field.
+ CATEGORICAL = 3;
+
+ // The parameter is real valued, with a fixed set of feasible points. If
+ // `type==DISCRETE`, feasible_points must be provided, and
+ // {`min_value`, `max_value`} will be ignored.
+ DISCRETE = 4;
+ }
+
+ // The type of scaling that should be applied to this parameter.
+ enum ScaleType {
+ // By default, no scaling is applied.
+ NONE = 0;
+
+ // Scales the feasible space to (0, 1) linearly.
+ UNIT_LINEAR_SCALE = 1;
+
+ // Scales the feasible space logarithmically to (0, 1). The entire feasible
+ // space must be strictly positive.
+ UNIT_LOG_SCALE = 2;
+
+ // Scales the feasible space "reverse" logarithmically to (0, 1). The result
+ // is that values close to the top of the feasible space are spread out more
+ // than points near the bottom. The entire feasible space must be strictly
+ // positive.
+ UNIT_REVERSE_LOG_SCALE = 3;
+ }
+
+ // Required. The parameter name must be unique amongst all ParameterConfigs in
+ // a HyperparameterSpec message. E.g., "learning_rate".
+ string parameter_name = 1;
+
+ // Required. The type of the parameter.
+ ParameterType type = 4;
+
+ // Required if type is `DOUBLE` or `INTEGER`. This field
+ // should be unset if type is `CATEGORICAL`. This value should be integers if
+ // type is INTEGER.
+ double min_value = 2;
+
+ // Required if typeis `DOUBLE` or `INTEGER`. This field
+ // should be unset if type is `CATEGORICAL`. This value should be integers if
+ // type is `INTEGER`.
+ double max_value = 3;
+
+ // Required if type is `CATEGORICAL`. The list of possible categories.
+ repeated string categorical_values = 5;
+
+ // Required if type is `DISCRETE`.
+ // A list of feasible points.
+ // The list should be in strictly increasing order. For instance, this
+ // parameter might have possible settings of 1.5, 2.5, and 4.0. This list
+ // should not contain more than 1,000 values.
+ repeated double discrete_values = 6;
+
+ // Optional. How the parameter should be scaled to the hypercube.
+ // Leave unset for categorical parameters.
+ // Some kind of scaling is strongly recommended for real or integral
+ // parameters (e.g., `UNIT_LINEAR_SCALE`).
+ ScaleType scale_type = 7;
+}
+
+// Represents the result of a single hyperparameter tuning trial from a
+// training job. The TrainingOutput object that is returned on successful
+// completion of a training job with hyperparameter tuning includes a list
+// of HyperparameterOutput objects, one for each successful trial.
+message HyperparameterOutput {
+ // An observed value of a metric.
+ message HyperparameterMetric {
+ // The global training step for this metric.
+ int64 training_step = 1;
+
+ // The objective value at this training step.
+ double objective_value = 2;
+ }
+
+ // The trial id for these results.
+ string trial_id = 1;
+
+ // The hyperparameters given to this trial.
+ map<string, string> hyperparameters = 2;
+
+ // The final objective metric seen for this trial.
+ HyperparameterMetric final_metric = 3;
+
+ // All recorded object metrics for this trial.
+ repeated HyperparameterMetric all_metrics = 4;
+}
+
+// Represents results of a training job. Output only.
+message TrainingOutput {
+ // The number of hyperparameter tuning trials that completed successfully.
+ // Only set for hyperparameter tuning jobs.
+ int64 completed_trial_count = 1;
+
+ // Results for individual Hyperparameter trials.
+ // Only set for hyperparameter tuning jobs.
+ repeated HyperparameterOutput trials = 2;
+
+ // The amount of ML units consumed by the job.
+ double consumed_ml_units = 3;
+
+ // Whether this job is a hyperparameter tuning job.
+ bool is_hyperparameter_tuning_job = 4;
+}
+
+// Represents input parameters for a prediction job.
+message PredictionInput {
+ // The format used to separate data instances in the source files.
+ enum DataFormat {
+ // Unspecified format.
+ DATA_FORMAT_UNSPECIFIED = 0;
+
+ // The source file is a text file with instances separated by the
+ // new-line character.
+ TEXT = 1;
+
+ // The source file is a TFRecord file.
+ TF_RECORD = 2;
+
+ // The source file is a GZIP-compressed TFRecord file.
+ TF_RECORD_GZIP = 3;
+ }
+
+ // Required. The model or the version to use for prediction.
+ oneof model_version {
+ // Use this field if you want to use the default version for the specified
+ // model. The string must use the following format:
+ //
+ // `"projects/<var>[YOUR_PROJECT]</var>/models/<var>[YOUR_MODEL]</var>"`
+ string model_name = 1;
+
+ // Use this field if you want to specify a version of the model to use. The
+ // string is formatted the same way as `model_version`, with the addition
+ // of the version information:
+ //
+ // `"projects/<var>[YOUR_PROJECT]</var>/models/<var>YOUR_MODEL/versions/<var>[YOUR_VERSION]</var>"`
+ string version_name = 2;
+
+ // Use this field if you want to specify a Google Cloud Storage path for
+ // the model to use.
+ string uri = 9;
+ }
+
+ // Required. The format of the input data files.
+ DataFormat data_format = 3;
+
+ // Required. The Google Cloud Storage location of the input data files.
+ // May contain wildcards.
+ repeated string input_paths = 4;
+
+ // Required. The output Google Cloud Storage location.
+ string output_path = 5;
+
+ // Optional. The maximum number of workers to be used for parallel processing.
+ // Defaults to 10 if not specified.
+ int64 max_worker_count = 6;
+
+ // Required. The Google Compute Engine region to run the prediction job in.
+ string region = 7;
+
+ // Optional. The Google Cloud ML runtime version to use for this batch
+ // prediction. If not set, Google Cloud ML will pick the runtime version used
+ // during the CreateVersion request for this model version, or choose the
+ // latest stable version when model version information is not available
+ // such as when the model is specified by uri.
+ string runtime_version = 8;
+}
+
+// Represents results of a prediction job.
+message PredictionOutput {
+ // The output Google Cloud Storage location provided at the job creation time.
+ string output_path = 1;
+
+ // The number of generated predictions.
+ int64 prediction_count = 2;
+
+ // The number of data instances which resulted in errors.
+ int64 error_count = 3;
+
+ // Node hours used by the batch prediction job.
+ double node_hours = 4;
+}
+
+// Represents a training or prediction job.
+message Job {
+ // Describes the job state.
+ enum State {
+ // The job state is unspecified.
+ STATE_UNSPECIFIED = 0;
+
+ // The job has been just created and processing has not yet begun.
+ QUEUED = 1;
+
+ // The service is preparing to run the job.
+ PREPARING = 2;
+
+ // The job is in progress.
+ RUNNING = 3;
+
+ // The job completed successfully.
+ SUCCEEDED = 4;
+
+ // The job failed.
+ // `error_message` should contain the details of the failure.
+ FAILED = 5;
+
+ // The job is being cancelled.
+ // `error_message` should describe the reason for the cancellation.
+ CANCELLING = 6;
+
+ // The job has been cancelled.
+ // `error_message` should describe the reason for the cancellation.
+ CANCELLED = 7;
+ }
+
+ // Required. The user-specified id of the job.
+ string job_id = 1;
+
+ // Required. Parameters to create a job.
+ oneof input {
+ // Input parameters to create a training job.
+ TrainingInput training_input = 2;
+
+ // Input parameters to create a prediction job.
+ PredictionInput prediction_input = 3;
+ }
+
+ // Output only. When the job was created.
+ google.protobuf.Timestamp create_time = 4;
+
+ // Output only. When the job processing was started.
+ google.protobuf.Timestamp start_time = 5;
+
+ // Output only. When the job processing was completed.
+ google.protobuf.Timestamp end_time = 6;
+
+ // Output only. The detailed state of a job.
+ State state = 7;
+
+ // Output only. The details of a failure or a cancellation.
+ string error_message = 8;
+
+ // Output only. The current result of the job.
+ oneof output {
+ // The current training job result.
+ TrainingOutput training_output = 9;
+
+ // The current prediction job result.
+ PredictionOutput prediction_output = 10;
+ }
+}
+
+// Request message for the CreateJob method.
+message CreateJobRequest {
+ // Required. The project name.
+ //
+ // Authorization: requires `Editor` role on the specified project.
+ string parent = 1;
+
+ // Required. The job to create.
+ Job job = 2;
+}
+
+// Request message for the ListJobs method.
+message ListJobsRequest {
+ // Required. The name of the project for which to list jobs.
+ //
+ // Authorization: requires `Viewer` role on the specified project.
+ string parent = 1;
+
+ // Optional. Specifies the subset of jobs to retrieve.
+ string filter = 2;
+
+ // Optional. A page token to request the next page of results.
+ //
+ // You get the token from the `next_page_token` field of the response from
+ // the previous call.
+ string page_token = 4;
+
+ // Optional. The number of jobs to retrieve per "page" of results. If there
+ // are more remaining results than this number, the response message will
+ // contain a valid value in the `next_page_token` field.
+ //
+ // The default value is 20, and the maximum page size is 100.
+ int32 page_size = 5;
+}
+
+// Response message for the ListJobs method.
+message ListJobsResponse {
+ // The list of jobs.
+ repeated Job jobs = 1;
+
+ // Optional. Pass this token as the `page_token` field of the request for a
+ // subsequent call.
+ string next_page_token = 2;
+}
+
+// Request message for the GetJob method.
+message GetJobRequest {
+ // Required. The name of the job to get the description of.
+ //
+ // Authorization: requires `Viewer` role on the parent project.
+ string name = 1;
+}
+
+// Request message for the CancelJob method.
+message CancelJobRequest {
+ // Required. The name of the job to cancel.
+ //
+ // Authorization: requires `Editor` role on the parent project.
+ string name = 1;
+}
diff --git a/third_party/googleapis/google/cloud/ml/v1/model_service.proto b/third_party/googleapis/google/cloud/ml/v1/model_service.proto
new file mode 100644
index 0000000000..783ba87134
--- /dev/null
+++ b/third_party/googleapis/google/cloud/ml/v1/model_service.proto
@@ -0,0 +1,371 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.ml.v1;
+
+import "google/api/annotations.proto";
+import "google/api/auth.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/ml/v1;ml";
+option java_multiple_files = true;
+option java_outer_classname = "ModelServiceProto";
+option java_package = "com.google.cloud.ml.api.v1";
+
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Proto file for the Google Cloud Machine Learning Engine.
+// Describes the 'models service' to work with the 'model' and 'version'
+// resources.
+
+
+
+// Provides methods that create and manage machine learning models and their
+// versions.
+//
+// A model in this context is a container for versions. The model can't provide
+// predictions without first having a version created for it.
+//
+// Each version is a trained machine learning model, and each is assumed to be
+// an iteration of the same machine learning problem as the other versions of
+// the same model.
+//
+// Your project can define multiple models, each with multiple versions.
+//
+// The basic life cycle of a model is:
+//
+// * Create and train the machine learning model and save it to a
+// Google Cloud Storage location.
+// * Use
+// [projects.models.create](/ml/reference/rest/v1/projects.models/create)
+// to make a new model in your project.
+// * Use
+// [projects.models.versions.create](/ml/reference/rest/v1/projects.models.versions/create)
+// to deploy your saved model.
+// * Use [projects.predict](/ml/reference/rest/v1/projects/predict to
+// request predictions of a version of your model, or use
+// [projects.jobs.create](/ml/reference/rest/v1/projects.jobs/create)
+// to start a batch prediction job.
+service ModelService {
+ // Creates a model which will later contain one or more versions.
+ //
+ // You must add at least one version before you can request predictions from
+ // the model. Add versions by calling
+ // [projects.models.versions.create](/ml/reference/rest/v1/projects.models.versions/create).
+ rpc CreateModel(CreateModelRequest) returns (Model) {
+ option (google.api.http) = { post: "/v1/{parent=projects/*}/models" body: "model" };
+ }
+
+ // Lists the models in a project.
+ //
+ // Each project can contain multiple models, and each model can have multiple
+ // versions.
+ rpc ListModels(ListModelsRequest) returns (ListModelsResponse) {
+ option (google.api.http) = { get: "/v1/{parent=projects/*}/models" };
+ }
+
+ // Gets information about a model, including its name, the description (if
+ // set), and the default version (if at least one version of the model has
+ // been deployed).
+ rpc GetModel(GetModelRequest) returns (Model) {
+ option (google.api.http) = { get: "/v1/{name=projects/*/models/*}" };
+ }
+
+ // Deletes a model.
+ //
+ // You can only delete a model if there are no versions in it. You can delete
+ // versions by calling
+ // [projects.models.versions.delete](/ml/reference/rest/v1/projects.models.versions/delete).
+ rpc DeleteModel(DeleteModelRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { delete: "/v1/{name=projects/*/models/*}" };
+ }
+
+ // Creates a new version of a model from a trained TensorFlow model.
+ //
+ // If the version created in the cloud by this call is the first deployed
+ // version of the specified model, it will be made the default version of the
+ // model. When you add a version to a model that already has one or more
+ // versions, the default version does not automatically change. If you want a
+ // new version to be the default, you must call
+ // [projects.models.versions.setDefault](/ml/reference/rest/v1/projects.models.versions/setDefault).
+ rpc CreateVersion(CreateVersionRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1/{parent=projects/*/models/*}/versions" body: "version" };
+ }
+
+ // Gets basic information about all the versions of a model.
+ //
+ // If you expect that a model has a lot of versions, or if you need to handle
+ // only a limited number of results at a time, you can request that the list
+ // be retrieved in batches (called pages):
+ rpc ListVersions(ListVersionsRequest) returns (ListVersionsResponse) {
+ option (google.api.http) = { get: "/v1/{parent=projects/*/models/*}/versions" };
+ }
+
+ // Gets information about a model version.
+ //
+ // Models can have multiple versions. You can call
+ // [projects.models.versions.list](/ml/reference/rest/v1/projects.models.versions/list)
+ // to get the same information that this method returns for all of the
+ // versions of a model.
+ rpc GetVersion(GetVersionRequest) returns (Version) {
+ option (google.api.http) = { get: "/v1/{name=projects/*/models/*/versions/*}" };
+ }
+
+ // Deletes a model version.
+ //
+ // Each model can have multiple versions deployed and in use at any given
+ // time. Use this method to remove a single version.
+ //
+ // Note: You cannot delete the version that is set as the default version
+ // of the model unless it is the only remaining version.
+ rpc DeleteVersion(DeleteVersionRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { delete: "/v1/{name=projects/*/models/*/versions/*}" };
+ }
+
+ // Designates a version to be the default for the model.
+ //
+ // The default version is used for prediction requests made against the model
+ // that don't specify a version.
+ //
+ // The first version to be created for a model is automatically set as the
+ // default. You must make any subsequent changes to the default version
+ // setting manually using this method.
+ rpc SetDefaultVersion(SetDefaultVersionRequest) returns (Version) {
+ option (google.api.http) = { post: "/v1/{name=projects/*/models/*/versions/*}:setDefault" body: "*" };
+ }
+}
+
+// Represents a machine learning solution.
+//
+// A model can have multiple versions, each of which is a deployed, trained
+// model ready to receive prediction requests. The model itself is just a
+// container.
+message Model {
+ // Required. The name specified for the model when it was created.
+ //
+ // The model name must be unique within the project it is created in.
+ string name = 1;
+
+ // Optional. The description specified for the model when it was created.
+ string description = 2;
+
+ // Output only. The default version of the model. This version will be used to
+ // handle prediction requests that do not specify a version.
+ //
+ // You can change the default version by calling
+ // [projects.methods.versions.setDefault](/ml/reference/rest/v1/projects.models.versions/setDefault).
+ Version default_version = 3;
+
+ // Optional. The list of regions where the model is going to be deployed.
+ // Currently only one region per model is supported.
+ // Defaults to 'us-central1' if nothing is set.
+ repeated string regions = 4;
+
+ // Optional. If true, enables StackDriver Logging for online prediction.
+ // Default is false.
+ bool online_prediction_logging = 5;
+}
+
+// Represents a version of the model.
+//
+// Each version is a trained model deployed in the cloud, ready to handle
+// prediction requests. A model can have multiple versions. You can get
+// information about all of the versions of a given model by calling
+// [projects.models.versions.list](/ml/reference/rest/v1/projects.models.versions/list).
+message Version {
+ // Required.The name specified for the version when it was created.
+ //
+ // The version name must be unique within the model it is created in.
+ string name = 1;
+
+ // Optional. The description specified for the version when it was created.
+ string description = 2;
+
+ // Output only. If true, this version will be used to handle prediction
+ // requests that do not specify a version.
+ //
+ // You can change the default version by calling
+ // [projects.methods.versions.setDefault](/ml/reference/rest/v1/projects.models.versions/setDefault).
+ bool is_default = 3;
+
+ // Required. The Google Cloud Storage location of the trained model used to
+ // create the version. See the
+ // [overview of model deployment](/ml/docs/concepts/deployment-overview) for
+ // more informaiton.
+ //
+ // When passing Version to
+ // [projects.models.versions.create](/ml/reference/rest/v1/projects.models.versions/create)
+ // the model service uses the specified location as the source of the model.
+ // Once deployed, the model version is hosted by the prediction service, so
+ // this location is useful only as a historical record.
+ string deployment_uri = 4;
+
+ // Output only. The time the version was created.
+ google.protobuf.Timestamp create_time = 5;
+
+ // Output only. The time the version was last used for prediction.
+ google.protobuf.Timestamp last_use_time = 6;
+
+ // Optional. The Google Cloud ML runtime version to use for this deployment.
+ // If not set, Google Cloud ML will choose a version.
+ string runtime_version = 8;
+
+ // Optional. Manually select the number of nodes to use for serving the
+ // model. If unset (i.e., by default), the number of nodes used to serve
+ // the model automatically scales with traffic. However, care should be
+ // taken to ramp up traffic according to the model's ability to scale. If
+ // your model needs to handle bursts of traffic beyond it's ability to
+ // scale, it is recommended you set this field appropriately.
+ ManualScaling manual_scaling = 9;
+}
+
+// Options for manually scaling a model.
+message ManualScaling {
+ // The number of nodes to allocate for this model. These nodes are always up,
+ // starting from the time the model is deployed, so the cost of operating
+ // this model will be proportional to nodes * number of hours since
+ // deployment.
+ int32 nodes = 1;
+}
+
+// Request message for the CreateModel method.
+message CreateModelRequest {
+ // Required. The project name.
+ //
+ // Authorization: requires `Editor` role on the specified project.
+ string parent = 1;
+
+ // Required. The model to create.
+ Model model = 2;
+}
+
+// Request message for the ListModels method.
+message ListModelsRequest {
+ // Required. The name of the project whose models are to be listed.
+ //
+ // Authorization: requires `Viewer` role on the specified project.
+ string parent = 1;
+
+ // Optional. A page token to request the next page of results.
+ //
+ // You get the token from the `next_page_token` field of the response from
+ // the previous call.
+ string page_token = 4;
+
+ // Optional. The number of models to retrieve per "page" of results. If there
+ // are more remaining results than this number, the response message will
+ // contain a valid value in the `next_page_token` field.
+ //
+ // The default value is 20, and the maximum page size is 100.
+ int32 page_size = 5;
+}
+
+// Response message for the ListModels method.
+message ListModelsResponse {
+ // The list of models.
+ repeated Model models = 1;
+
+ // Optional. Pass this token as the `page_token` field of the request for a
+ // subsequent call.
+ string next_page_token = 2;
+}
+
+// Request message for the GetModel method.
+message GetModelRequest {
+ // Required. The name of the model.
+ //
+ // Authorization: requires `Viewer` role on the parent project.
+ string name = 1;
+}
+
+// Request message for the DeleteModel method.
+message DeleteModelRequest {
+ // Required. The name of the model.
+ //
+ // Authorization: requires `Editor` role on the parent project.
+ string name = 1;
+}
+
+// Uploads the provided trained model version to Cloud Machine Learning.
+message CreateVersionRequest {
+ // Required. The name of the model.
+ //
+ // Authorization: requires `Editor` role on the parent project.
+ string parent = 1;
+
+ // Required. The version details.
+ Version version = 2;
+}
+
+// Request message for the ListVersions method.
+message ListVersionsRequest {
+ // Required. The name of the model for which to list the version.
+ //
+ // Authorization: requires `Viewer` role on the parent project.
+ string parent = 1;
+
+ // Optional. A page token to request the next page of results.
+ //
+ // You get the token from the `next_page_token` field of the response from
+ // the previous call.
+ string page_token = 4;
+
+ // Optional. The number of versions to retrieve per "page" of results. If
+ // there are more remaining results than this number, the response message
+ // will contain a valid value in the `next_page_token` field.
+ //
+ // The default value is 20, and the maximum page size is 100.
+ int32 page_size = 5;
+}
+
+// Response message for the ListVersions method.
+message ListVersionsResponse {
+ // The list of versions.
+ repeated Version versions = 1;
+
+ // Optional. Pass this token as the `page_token` field of the request for a
+ // subsequent call.
+ string next_page_token = 2;
+}
+
+// Request message for the GetVersion method.
+message GetVersionRequest {
+ // Required. The name of the version.
+ //
+ // Authorization: requires `Viewer` role on the parent project.
+ string name = 1;
+}
+
+// Request message for the DeleteVerionRequest method.
+message DeleteVersionRequest {
+ // Required. The name of the version. You can get the names of all the
+ // versions of a model by calling
+ // [projects.models.versions.list](/ml/reference/rest/v1/projects.models.versions/list).
+ //
+ // Authorization: requires `Editor` role on the parent project.
+ string name = 1;
+}
+
+// Request message for the SetDefaultVersion request.
+message SetDefaultVersionRequest {
+ // Required. The name of the version to make the default for the model. You
+ // can get the names of all the versions of a model by calling
+ // [projects.models.versions.list](/ml/reference/rest/v1/projects.models.versions/list).
+ //
+ // Authorization: requires `Editor` role on the parent project.
+ string name = 1;
+}
diff --git a/third_party/googleapis/google/cloud/ml/v1/operation_metadata.proto b/third_party/googleapis/google/cloud/ml/v1/operation_metadata.proto
new file mode 100644
index 0000000000..c29a789259
--- /dev/null
+++ b/third_party/googleapis/google/cloud/ml/v1/operation_metadata.proto
@@ -0,0 +1,72 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.ml.v1;
+
+import "google/api/annotations.proto";
+import "google/cloud/ml/v1/model_service.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/ml/v1;ml";
+option java_multiple_files = true;
+option java_outer_classname = "OperationMetadataProto";
+option java_package = "com.google.cloud.ml.api.v1";
+
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Proto file for the Google Cloud Machine Learning Engine.
+// Describes the metadata for longrunning operations.
+
+
+
+// Represents the metadata of the long-running operation.
+message OperationMetadata {
+ // The operation type.
+ enum OperationType {
+ // Unspecified operation type.
+ OPERATION_TYPE_UNSPECIFIED = 0;
+
+ // An operation to create a new version.
+ CREATE_VERSION = 1;
+
+ // An operation to delete an existing version.
+ DELETE_VERSION = 2;
+
+ // An operation to delete an existing model.
+ DELETE_MODEL = 3;
+ }
+
+ // The time the operation was submitted.
+ google.protobuf.Timestamp create_time = 1;
+
+ // The time operation processing started.
+ google.protobuf.Timestamp start_time = 2;
+
+ // The time operation processing completed.
+ google.protobuf.Timestamp end_time = 3;
+
+ // Indicates whether a request to cancel this operation has been made.
+ bool is_cancellation_requested = 4;
+
+ // The operation type.
+ OperationType operation_type = 5;
+
+ // Contains the name of the model associated with the operation.
+ string model_name = 6;
+
+ // Contains the version associated with the operation.
+ Version version = 7;
+}
diff --git a/third_party/googleapis/google/cloud/ml/v1/prediction_service.proto b/third_party/googleapis/google/cloud/ml/v1/prediction_service.proto
new file mode 100644
index 0000000000..c5e25dcd3b
--- /dev/null
+++ b/third_party/googleapis/google/cloud/ml/v1/prediction_service.proto
@@ -0,0 +1,240 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.ml.v1;
+
+import "google/api/annotations.proto";
+import "google/api/httpbody.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/ml/v1;ml";
+option java_multiple_files = true;
+option java_outer_classname = "PredictionServiceProto";
+option java_package = "com.google.cloud.ml.api.v1";
+
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Proto file for the Google Cloud Machine Learning Engine.
+// Describes the online prediction service.
+
+
+
+// The Prediction API, which serves predictions for models managed by
+// ModelService.
+service OnlinePredictionService {
+ // Performs prediction on the data in the request.
+ //
+ // **** REMOVE FROM GENERATED DOCUMENTATION
+ rpc Predict(PredictRequest) returns (google.api.HttpBody) {
+ option (google.api.http) = { post: "/v1/{name=projects/**}:predict" body: "*" };
+ }
+}
+
+// Request for predictions to be issued against a trained model.
+//
+// The body of the request is a single JSON object with a single top-level
+// field:
+//
+// <dl>
+// <dt>instances</dt>
+// <dd>A JSON array containing values representing the instances to use for
+// prediction.</dd>
+// </dl>
+//
+// The structure of each element of the instances list is determined by your
+// model's input definition. Instances can include named inputs or can contain
+// only unlabeled values.
+//
+// Not all data includes named inputs. Some instances will be simple
+// JSON values (boolean, number, or string). However, instances are often lists
+// of simple values, or complex nested lists. Here are some examples of request
+// bodies:
+//
+// CSV data with each row encoded as a string value:
+// <pre>
+// {"instances": ["1.0,true,\\"x\\"", "-2.0,false,\\"y\\""]}
+// </pre>
+// Plain text:
+// <pre>
+// {"instances": ["the quick brown fox", "la bruja le dio"]}
+// </pre>
+// Sentences encoded as lists of words (vectors of strings):
+// <pre>
+// {
+// "instances": [
+// ["the","quick","brown"],
+// ["la","bruja","le"],
+// ...
+// ]
+// }
+// </pre>
+// Floating point scalar values:
+// <pre>
+// {"instances": [0.0, 1.1, 2.2]}
+// </pre>
+// Vectors of integers:
+// <pre>
+// {
+// "instances": [
+// [0, 1, 2],
+// [3, 4, 5],
+// ...
+// ]
+// }
+// </pre>
+// Tensors (in this case, two-dimensional tensors):
+// <pre>
+// {
+// "instances": [
+// [
+// [0, 1, 2],
+// [3, 4, 5]
+// ],
+// ...
+// ]
+// }
+// </pre>
+// Images can be represented different ways. In this encoding scheme the first
+// two dimensions represent the rows and columns of the image, and the third
+// contains lists (vectors) of the R, G, and B values for each pixel.
+// <pre>
+// {
+// "instances": [
+// [
+// [
+// [138, 30, 66],
+// [130, 20, 56],
+// ...
+// ],
+// [
+// [126, 38, 61],
+// [122, 24, 57],
+// ...
+// ],
+// ...
+// ],
+// ...
+// ]
+// }
+// </pre>
+// JSON strings must be encoded as UTF-8. To send binary data, you must
+// base64-encode the data and mark it as binary. To mark a JSON string
+// as binary, replace it with a JSON object with a single attribute named `b64`:
+// <pre>{"b64": "..."} </pre>
+// For example:
+//
+// Two Serialized tf.Examples (fake data, for illustrative purposes only):
+// <pre>
+// {"instances": [{"b64": "X5ad6u"}, {"b64": "IA9j4nx"}]}
+// </pre>
+// Two JPEG image byte strings (fake data, for illustrative purposes only):
+// <pre>
+// {"instances": [{"b64": "ASa8asdf"}, {"b64": "JLK7ljk3"}]}
+// </pre>
+// If your data includes named references, format each instance as a JSON object
+// with the named references as the keys:
+//
+// JSON input data to be preprocessed:
+// <pre>
+// {
+// "instances": [
+// {
+// "a": 1.0,
+// "b": true,
+// "c": "x"
+// },
+// {
+// "a": -2.0,
+// "b": false,
+// "c": "y"
+// }
+// ]
+// }
+// </pre>
+// Some models have an underlying TensorFlow graph that accepts multiple input
+// tensors. In this case, you should use the names of JSON name/value pairs to
+// identify the input tensors, as shown in the following exmaples:
+//
+// For a graph with input tensor aliases "tag" (string) and "image"
+// (base64-encoded string):
+// <pre>
+// {
+// "instances": [
+// {
+// "tag": "beach",
+// "image": {"b64": "ASa8asdf"}
+// },
+// {
+// "tag": "car",
+// "image": {"b64": "JLK7ljk3"}
+// }
+// ]
+// }
+// </pre>
+// For a graph with input tensor aliases "tag" (string) and "image"
+// (3-dimensional array of 8-bit ints):
+// <pre>
+// {
+// "instances": [
+// {
+// "tag": "beach",
+// "image": [
+// [
+// [138, 30, 66],
+// [130, 20, 56],
+// ...
+// ],
+// [
+// [126, 38, 61],
+// [122, 24, 57],
+// ...
+// ],
+// ...
+// ]
+// },
+// {
+// "tag": "car",
+// "image": [
+// [
+// [255, 0, 102],
+// [255, 0, 97],
+// ...
+// ],
+// [
+// [254, 1, 101],
+// [254, 2, 93],
+// ...
+// ],
+// ...
+// ]
+// },
+// ...
+// ]
+// }
+// </pre>
+// If the call is successful, the response body will contain one prediction
+// entry per instance in the request body. If prediction fails for any
+// instance, the response body will contain no predictions and will contian
+// a single error entry instead.
+message PredictRequest {
+ // Required. The resource name of a model or a version.
+ //
+ // Authorization: requires `Viewer` role on the parent project.
+ string name = 1;
+
+ //
+ // Required. The prediction request body.
+ google.api.HttpBody http_body = 2;
+}
diff --git a/third_party/googleapis/google/cloud/ml/v1/project_service.proto b/third_party/googleapis/google/cloud/ml/v1/project_service.proto
new file mode 100644
index 0000000000..f54eadf6a3
--- /dev/null
+++ b/third_party/googleapis/google/cloud/ml/v1/project_service.proto
@@ -0,0 +1,59 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.ml.v1;
+
+import "google/api/annotations.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/ml/v1;ml";
+option java_multiple_files = true;
+option java_outer_classname = "ProjectServiceProto";
+option java_package = "com.google.cloud.ml.api.v1";
+
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Proto file for the Google Cloud Machine Learning Engine.
+// Describes the project management service.
+
+
+
+// Allows retrieving project related information.
+service ProjectManagementService {
+ // Get the service account information associated with your project. You need
+ // this information in order to grant the service account persmissions for
+ // the Google Cloud Storage location where you put your model training code
+ // for training the model with Google Cloud Machine Learning.
+ rpc GetConfig(GetConfigRequest) returns (GetConfigResponse) {
+ option (google.api.http) = { get: "/v1/{name=projects/*}:getConfig" };
+ }
+}
+
+// Requests service account information associated with a project.
+message GetConfigRequest {
+ // Required. The project name.
+ //
+ // Authorization: requires `Viewer` role on the specified project.
+ string name = 1;
+}
+
+// Returns service account information associated with a project.
+message GetConfigResponse {
+ // The service account Cloud ML uses to access resources in the project.
+ string service_account = 1;
+
+ // The project number for `service_account`.
+ int64 service_account_project = 2;
+}
diff --git a/third_party/googleapis/google/cloud/ml/v1beta1/job_service.proto b/third_party/googleapis/google/cloud/ml/v1beta1/job_service.proto
new file mode 100644
index 0000000000..1ac71e175c
--- /dev/null
+++ b/third_party/googleapis/google/cloud/ml/v1beta1/job_service.proto
@@ -0,0 +1,605 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.ml.v1beta1;
+
+import "google/api/annotations.proto";
+import "google/api/auth.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/ml/v1beta1;ml";
+option java_multiple_files = true;
+option java_outer_classname = "JobServiceProto";
+option java_package = "com.google.cloud.ml.api.v1beta1";
+
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Proto file for the Google Cloud Machine Learning Engine.
+// Describes the 'job service' to manage training and prediction jobs.
+
+
+
+// Service to create and manage training and batch prediction jobs.
+service JobService {
+ // Creates a training or a batch prediction job.
+ rpc CreateJob(CreateJobRequest) returns (Job) {
+ option (google.api.http) = { post: "/v1beta1/{parent=projects/*}/jobs" body: "job" };
+ }
+
+ // Lists the jobs in the project.
+ rpc ListJobs(ListJobsRequest) returns (ListJobsResponse) {
+ option (google.api.http) = { get: "/v1beta1/{parent=projects/*}/jobs" };
+ }
+
+ // Describes a job.
+ rpc GetJob(GetJobRequest) returns (Job) {
+ option (google.api.http) = { get: "/v1beta1/{name=projects/*/jobs/*}" };
+ }
+
+ // Cancels a running job.
+ rpc CancelJob(CancelJobRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { post: "/v1beta1/{name=projects/*/jobs/*}:cancel" body: "*" };
+ }
+}
+
+// Represents input parameters for a training job.
+message TrainingInput {
+ // A scale tier is an abstract representation of the resources Cloud ML
+ // will allocate to a training job. When selecting a scale tier for your
+ // training job, you should consider the size of your training dataset and
+ // the complexity of your model. As the tiers increase, virtual machines are
+ // added to handle your job, and the individual machines in the cluster
+ // generally have more memory and greater processing power than they do at
+ // lower tiers. The number of training units charged per hour of processing
+ // increases as tiers get more advanced. Refer to the
+ // [pricing guide](/ml/pricing) for more details. Note that in addition to
+ // incurring costs, your use of training resources is constrained by the
+ // [quota policy](/ml/quota).
+ enum ScaleTier {
+ // A single worker instance. This tier is suitable for learning how to use
+ // Cloud ML, and for experimenting with new models using small datasets.
+ BASIC = 0;
+
+ // Many workers and a few parameter servers.
+ STANDARD_1 = 1;
+
+ // A large number of workers with many parameter servers.
+ PREMIUM_1 = 3;
+
+ // A single worker instance [with a GPU](ml/docs/how-tos/using-gpus).
+ BASIC_GPU = 6;
+
+ // The CUSTOM tier is not a set tier, but rather enables you to use your
+ // own cluster specification. When you use this tier, set values to
+ // configure your processing cluster according to these guidelines:
+ //
+ // * You _must_ set `TrainingInput.masterType` to specify the type
+ // of machine to use for your master node. This is the only required
+ // setting.
+ //
+ // * You _may_ set `TrainingInput.workerCount` to specify the number of
+ // workers to use. If you specify one or more workers, you _must_ also
+ // set `TrainingInput.workerType` to specify the type of machine to use
+ // for your worker nodes.
+ //
+ // * You _may_ set `TrainingInput.parameterServerCount` to specify the
+ // number of parameter servers to use. If you specify one or more
+ // parameter servers, you _must_ also set
+ // `TrainingInput.parameterServerType` to specify the type of machine to
+ // use for your parameter servers.
+ //
+ // Note that all of your workers must use the same machine type, which can
+ // be different from your parameter server type and master type. Your
+ // parameter servers must likewise use the same machine type, which can be
+ // different from your worker type and master type.
+ CUSTOM = 5;
+ }
+
+ // Required. Specifies the machine types, the number of replicas for workers
+ // and parameter servers.
+ ScaleTier scale_tier = 1;
+
+ // Optional. Specifies the type of virtual machine to use for your training
+ // job's master worker.
+ //
+ // The following types are supported:
+ //
+ // <dl>
+ // <dt>standard</dt>
+ // <dd>
+ // A basic machine configuration suitable for training simple models with
+ // small to moderate datasets.
+ // </dd>
+ // <dt>large_model</dt>
+ // <dd>
+ // A machine with a lot of memory, specially suited for parameter servers
+ // when your model is large (having many hidden layers or layers with very
+ // large numbers of nodes).
+ // </dd>
+ // <dt>complex_model_s</dt>
+ // <dd>
+ // A machine suitable for the master and workers of the cluster when your
+ // model requires more computation than the standard machine can handle
+ // satisfactorily.
+ // </dd>
+ // <dt>complex_model_m</dt>
+ // <dd>
+ // A machine with roughly twice the number of cores and roughly double the
+ // memory of <code suppresswarning="true">complex_model_s</code>.
+ // </dd>
+ // <dt>complex_model_l</dt>
+ // <dd>
+ // A machine with roughly twice the number of cores and roughly double the
+ // memory of <code suppresswarning="true">complex_model_m</code>.
+ // </dd>
+ // <dt>standard_gpu</dt>
+ // <dd>
+ // A machine equivalent to <code suppresswarning="true">standard</code> that
+ // also includes a
+ // <a href="ml/docs/how-tos/using-gpus">
+ // GPU that you can use in your trainer</a>.
+ // </dd>
+ // <dt>complex_model_m_gpu</dt>
+ // <dd>
+ // A machine equivalent to
+ // <code suppresswarning="true">coplex_model_m</code> that also includes
+ // four GPUs.
+ // </dd>
+ // </dl>
+ //
+ // You must set this value when `scaleTier` is set to `CUSTOM`.
+ string master_type = 2;
+
+ // Optional. Specifies the type of virtual machine to use for your training
+ // job's worker nodes.
+ //
+ // The supported values are the same as those described in the entry for
+ // `masterType`.
+ //
+ // This value must be present when `scaleTier` is set to `CUSTOM` and
+ // `workerCount` is greater than zero.
+ string worker_type = 3;
+
+ // Optional. Specifies the type of virtual machine to use for your training
+ // job's parameter server.
+ //
+ // The supported values are the same as those described in the entry for
+ // `master_type`.
+ //
+ // This value must be present when `scaleTier` is set to `CUSTOM` and
+ // `parameter_server_count` is greater than zero.
+ string parameter_server_type = 4;
+
+ // Optional. The number of worker replicas to use for the training job. Each
+ // replica in the cluster will be of the type specified in `worker_type`.
+ //
+ // This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+ // set this value, you must also set `worker_type`.
+ int64 worker_count = 5;
+
+ // Optional. The number of parameter server replicas to use for the training
+ // job. Each replica in the cluster will be of the type specified in
+ // `parameter_server_type`.
+ //
+ // This value can only be used when `scale_tier` is set to `CUSTOM`.If you
+ // set this value, you must also set `parameter_server_type`.
+ int64 parameter_server_count = 6;
+
+ // Required. The Google Cloud Storage location of the packages with
+ // the training program and any additional dependencies.
+ repeated string package_uris = 7;
+
+ // Required. The Python module name to run after installing the packages.
+ string python_module = 8;
+
+ // Optional. Command line arguments to pass to the program.
+ repeated string args = 10;
+
+ // Optional. The set of Hyperparameters to tune.
+ HyperparameterSpec hyperparameters = 12;
+
+ // Required. The Google Compute Engine region to run the training job in.
+ string region = 14;
+
+ // Optional. A Google Cloud Storage path in which to store training outputs
+ // and other data needed for training. This path is passed to your TensorFlow
+ // program as the 'job_dir' command-line argument. The benefit of specifying
+ // this field is that Cloud ML validates the path for use in training.
+ string job_dir = 16;
+
+ // Optional. The Google Cloud ML runtime version to use for training. If not
+ // set, Google Cloud ML will choose the latest stable version.
+ string runtime_version = 15;
+}
+
+// Represents a set of hyperparameters to optimize.
+message HyperparameterSpec {
+ // The available types of optimization goals.
+ enum GoalType {
+ // Goal Type will default to maximize.
+ GOAL_TYPE_UNSPECIFIED = 0;
+
+ // Maximize the goal metric.
+ MAXIMIZE = 1;
+
+ // Minimize the goal metric.
+ MINIMIZE = 2;
+ }
+
+ // Required. The type of goal to use for tuning. Available types are
+ // `MAXIMIZE` and `MINIMIZE`.
+ //
+ // Defaults to `MAXIMIZE`.
+ GoalType goal = 1;
+
+ // Required. The set of parameters to tune.
+ repeated ParameterSpec params = 2;
+
+ // Optional. How many training trials should be attempted to optimize
+ // the specified hyperparameters.
+ //
+ // Defaults to one.
+ int32 max_trials = 3;
+
+ // Optional. The number of training trials to run concurrently.
+ // You can reduce the time it takes to perform hyperparameter tuning by adding
+ // trials in parallel. However, each trail only benefits from the information
+ // gained in completed trials. That means that a trial does not get access to
+ // the results of trials running at the same time, which could reduce the
+ // quality of the overall optimization.
+ //
+ // Each trial will use the same scale tier and machine types.
+ //
+ // Defaults to one.
+ int32 max_parallel_trials = 4;
+
+ // Optional. The Tensorflow summary tag name to use for optimizing trials. For
+ // current versions of Tensorflow, this tag name should exactly match what is
+ // shown in Tensorboard, including all scopes. For versions of Tensorflow
+ // prior to 0.12, this should be only the tag passed to tf.Summary.
+ // By default, "training/hptuning/metric" will be used.
+ string hyperparameter_metric_tag = 5;
+}
+
+// Represents a single hyperparameter to optimize.
+message ParameterSpec {
+ // The type of the parameter.
+ enum ParameterType {
+ // You must specify a valid type. Using this unspecified type will result in
+ // an error.
+ PARAMETER_TYPE_UNSPECIFIED = 0;
+
+ // Type for real-valued parameters.
+ DOUBLE = 1;
+
+ // Type for integral parameters.
+ INTEGER = 2;
+
+ // The parameter is categorical, with a value chosen from the categories
+ // field.
+ CATEGORICAL = 3;
+
+ // The parameter is real valued, with a fixed set of feasible points. If
+ // `type==DISCRETE`, feasible_points must be provided, and
+ // {`min_value`, `max_value`} will be ignored.
+ DISCRETE = 4;
+ }
+
+ // The type of scaling that should be applied to this parameter.
+ enum ScaleType {
+ // By default, no scaling is applied.
+ NONE = 0;
+
+ // Scales the feasible space to (0, 1) linearly.
+ UNIT_LINEAR_SCALE = 1;
+
+ // Scales the feasible space logarithmically to (0, 1). The entire feasible
+ // space must be strictly positive.
+ UNIT_LOG_SCALE = 2;
+
+ // Scales the feasible space "reverse" logarithmically to (0, 1). The result
+ // is that values close to the top of the feasible space are spread out more
+ // than points near the bottom. The entire feasible space must be strictly
+ // positive.
+ UNIT_REVERSE_LOG_SCALE = 3;
+ }
+
+ // Required. The parameter name must be unique amongst all ParameterConfigs in
+ // a HyperparameterSpec message. E.g., "learning_rate".
+ string parameter_name = 1;
+
+ // Required. The type of the parameter.
+ ParameterType type = 4;
+
+ // Required if type is `DOUBLE` or `INTEGER`. This field
+ // should be unset if type is `CATEGORICAL`. This value should be integers if
+ // type is INTEGER.
+ double min_value = 2;
+
+ // Required if typeis `DOUBLE` or `INTEGER`. This field
+ // should be unset if type is `CATEGORICAL`. This value should be integers if
+ // type is `INTEGER`.
+ double max_value = 3;
+
+ // Required if type is `CATEGORICAL`. The list of possible categories.
+ repeated string categorical_values = 5;
+
+ // Required if type is `DISCRETE`.
+ // A list of feasible points.
+ // The list should be in strictly increasing order. For instance, this
+ // parameter might have possible settings of 1.5, 2.5, and 4.0. This list
+ // should not contain more than 1,000 values.
+ repeated double discrete_values = 6;
+
+ // Optional. How the parameter should be scaled to the hypercube.
+ // Leave unset for categorical parameters.
+ // Some kind of scaling is strongly recommended for real or integral
+ // parameters (e.g., `UNIT_LINEAR_SCALE`).
+ ScaleType scale_type = 7;
+}
+
+// Represents the result of a single hyperparameter tuning trial from a
+// training job. The TrainingOutput object that is returned on successful
+// completion of a training job with hyperparameter tuning includes a list
+// of HyperparameterOutput objects, one for each successful trial.
+message HyperparameterOutput {
+ // An observed value of a metric.
+ message HyperparameterMetric {
+ // The global training step for this metric.
+ int64 training_step = 1;
+
+ // The objective value at this training step.
+ double objective_value = 2;
+ }
+
+ // The trial id for these results.
+ string trial_id = 1;
+
+ // The hyperparameters given to this trial.
+ map<string, string> hyperparameters = 2;
+
+ // The final objective metric seen for this trial.
+ HyperparameterMetric final_metric = 3;
+
+ // All recorded object metrics for this trial.
+ repeated HyperparameterMetric all_metrics = 4;
+}
+
+// Represents results of a training job. Output only.
+message TrainingOutput {
+ // The number of hyperparameter tuning trials that completed successfully.
+ // Only set for hyperparameter tuning jobs.
+ int64 completed_trial_count = 1;
+
+ // Results for individual Hyperparameter trials.
+ // Only set for hyperparameter tuning jobs.
+ repeated HyperparameterOutput trials = 2;
+
+ // The amount of ML units consumed by the job.
+ double consumed_ml_units = 3;
+
+ // Whether this job is a hyperparameter tuning job.
+ bool is_hyperparameter_tuning_job = 4;
+}
+
+// Represents input parameters for a prediction job.
+message PredictionInput {
+ // The format used to separate data instances in the source files.
+ enum DataFormat {
+ // Unspecified format.
+ DATA_FORMAT_UNSPECIFIED = 0;
+
+ // The source file is a text file with instances separated by the
+ // new-line character.
+ TEXT = 1;
+
+ // The source file is a TFRecord file.
+ TF_RECORD = 2;
+
+ // The source file is a GZIP-compressed TFRecord file.
+ TF_RECORD_GZIP = 3;
+ }
+
+ // Required. The model or the version to use for prediction.
+ oneof model_version {
+ // Use this field if you want to use the default version for the specified
+ // model. The string must use the following format:
+ //
+ // `"projects/<var>[YOUR_PROJECT]</var>/models/<var>[YOUR_MODEL]</var>"`
+ string model_name = 1;
+
+ // Use this field if you want to specify a version of the model to use. The
+ // string is formatted the same way as `model_version`, with the addition
+ // of the version information:
+ //
+ // `"projects/<var>[YOUR_PROJECT]</var>/models/<var>YOUR_MODEL/versions/<var>[YOUR_VERSION]</var>"`
+ string version_name = 2;
+
+ // Use this field if you want to specify a Google Cloud Storage path for
+ // the model to use.
+ string uri = 9;
+ }
+
+ // Required. The format of the input data files.
+ DataFormat data_format = 3;
+
+ // Required. The Google Cloud Storage location of the input data files.
+ // May contain wildcards.
+ repeated string input_paths = 4;
+
+ // Required. The output Google Cloud Storage location.
+ string output_path = 5;
+
+ // Optional. The maximum number of workers to be used for parallel processing.
+ // Defaults to 10 if not specified.
+ int64 max_worker_count = 6;
+
+ // Required. The Google Compute Engine region to run the prediction job in.
+ string region = 7;
+
+ // Optional. The Google Cloud ML runtime version to use for this batch
+ // prediction. If not set, Google Cloud ML will pick the runtime version used
+ // during the CreateVersion request for this model version, or choose the
+ // latest stable version when model version information is not available
+ // such as when the model is specified by uri.
+ string runtime_version = 8;
+}
+
+// Represents results of a prediction job.
+message PredictionOutput {
+ // The output Google Cloud Storage location provided at the job creation time.
+ string output_path = 1;
+
+ // The number of generated predictions.
+ int64 prediction_count = 2;
+
+ // The number of data instances which resulted in errors.
+ int64 error_count = 3;
+
+ // Node hours used by the batch prediction job.
+ double node_hours = 4;
+}
+
+// Represents a training or prediction job.
+message Job {
+ // Describes the job state.
+ enum State {
+ // The job state is unspecified.
+ STATE_UNSPECIFIED = 0;
+
+ // The job has been just created and processing has not yet begun.
+ QUEUED = 1;
+
+ // The service is preparing to run the job.
+ PREPARING = 2;
+
+ // The job is in progress.
+ RUNNING = 3;
+
+ // The job completed successfully.
+ SUCCEEDED = 4;
+
+ // The job failed.
+ // `error_message` should contain the details of the failure.
+ FAILED = 5;
+
+ // The job is being cancelled.
+ // `error_message` should describe the reason for the cancellation.
+ CANCELLING = 6;
+
+ // The job has been cancelled.
+ // `error_message` should describe the reason for the cancellation.
+ CANCELLED = 7;
+ }
+
+ // Required. The user-specified id of the job.
+ string job_id = 1;
+
+ // Required. Parameters to create a job.
+ oneof input {
+ // Input parameters to create a training job.
+ TrainingInput training_input = 2;
+
+ // Input parameters to create a prediction job.
+ PredictionInput prediction_input = 3;
+ }
+
+ // Output only. When the job was created.
+ google.protobuf.Timestamp create_time = 4;
+
+ // Output only. When the job processing was started.
+ google.protobuf.Timestamp start_time = 5;
+
+ // Output only. When the job processing was completed.
+ google.protobuf.Timestamp end_time = 6;
+
+ // Output only. The detailed state of a job.
+ State state = 7;
+
+ // Output only. The details of a failure or a cancellation.
+ string error_message = 8;
+
+ // Output only. The current result of the job.
+ oneof output {
+ // The current training job result.
+ TrainingOutput training_output = 9;
+
+ // The current prediction job result.
+ PredictionOutput prediction_output = 10;
+ }
+}
+
+// Request message for the CreateJob method.
+message CreateJobRequest {
+ // Required. The project name.
+ //
+ // Authorization: requires `Editor` role on the specified project.
+ string parent = 1;
+
+ // Required. The job to create.
+ Job job = 2;
+}
+
+// Request message for the ListJobs method.
+message ListJobsRequest {
+ // Required. The name of the project for which to list jobs.
+ //
+ // Authorization: requires `Viewer` role on the specified project.
+ string parent = 1;
+
+ // Optional. Specifies the subset of jobs to retrieve.
+ string filter = 2;
+
+ // Optional. A page token to request the next page of results.
+ //
+ // You get the token from the `next_page_token` field of the response from
+ // the previous call.
+ string page_token = 4;
+
+ // Optional. The number of jobs to retrieve per "page" of results. If there
+ // are more remaining results than this number, the response message will
+ // contain a valid value in the `next_page_token` field.
+ //
+ // The default value is 20, and the maximum page size is 100.
+ int32 page_size = 5;
+}
+
+// Response message for the ListJobs method.
+message ListJobsResponse {
+ // The list of jobs.
+ repeated Job jobs = 1;
+
+ // Optional. Pass this token as the `page_token` field of the request for a
+ // subsequent call.
+ string next_page_token = 2;
+}
+
+// Request message for the GetJob method.
+message GetJobRequest {
+ // Required. The name of the job to get the description of.
+ //
+ // Authorization: requires `Viewer` role on the parent project.
+ string name = 1;
+}
+
+// Request message for the CancelJob method.
+message CancelJobRequest {
+ // Required. The name of the job to cancel.
+ //
+ // Authorization: requires `Editor` role on the parent project.
+ string name = 1;
+}
diff --git a/third_party/googleapis/google/cloud/ml/v1beta1/model_service.proto b/third_party/googleapis/google/cloud/ml/v1beta1/model_service.proto
new file mode 100644
index 0000000000..08b0d37a54
--- /dev/null
+++ b/third_party/googleapis/google/cloud/ml/v1beta1/model_service.proto
@@ -0,0 +1,371 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.ml.v1beta1;
+
+import "google/api/annotations.proto";
+import "google/api/auth.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/ml/v1beta1;ml";
+option java_multiple_files = true;
+option java_outer_classname = "ModelServiceProto";
+option java_package = "com.google.cloud.ml.api.v1beta1";
+
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Proto file for the Google Cloud Machine Learning Engine.
+// Describes the 'models service' to work with the 'model' and 'version'
+// resources.
+
+
+
+// Provides methods that create and manage machine learning models and their
+// versions.
+//
+// A model in this context is a container for versions. The model can't provide
+// predictions without first having a version created for it.
+//
+// Each version is a trained machine learning model, and each is assumed to be
+// an iteration of the same machine learning problem as the other versions of
+// the same model.
+//
+// Your project can define multiple models, each with multiple versions.
+//
+// The basic life cycle of a model is:
+//
+// * Create and train the machine learning model and save it to a
+// Google Cloud Storage location.
+// * Use
+// [projects.models.create](/ml/reference/rest/v1beta1/projects.models/create)
+// to make a new model in your project.
+// * Use
+// [projects.models.versions.create](/ml/reference/rest/v1beta1/projects.models.versions/create)
+// to deploy your saved model.
+// * Use [projects.predict](/ml/reference/rest/v1beta1/projects/predict to
+// request predictions of a version of your model, or use
+// [projects.jobs.create](/ml/reference/rest/v1beta1/projects.jobs/create)
+// to start a batch prediction job.
+service ModelService {
+ // Creates a model which will later contain one or more versions.
+ //
+ // You must add at least one version before you can request predictions from
+ // the model. Add versions by calling
+ // [projects.models.versions.create](/ml/reference/rest/v1beta1/projects.models.versions/create).
+ rpc CreateModel(CreateModelRequest) returns (Model) {
+ option (google.api.http) = { post: "/v1beta1/{parent=projects/*}/models" body: "model" };
+ }
+
+ // Lists the models in a project.
+ //
+ // Each project can contain multiple models, and each model can have multiple
+ // versions.
+ rpc ListModels(ListModelsRequest) returns (ListModelsResponse) {
+ option (google.api.http) = { get: "/v1beta1/{parent=projects/*}/models" };
+ }
+
+ // Gets information about a model, including its name, the description (if
+ // set), and the default version (if at least one version of the model has
+ // been deployed).
+ rpc GetModel(GetModelRequest) returns (Model) {
+ option (google.api.http) = { get: "/v1beta1/{name=projects/*/models/*}" };
+ }
+
+ // Deletes a model.
+ //
+ // You can only delete a model if there are no versions in it. You can delete
+ // versions by calling
+ // [projects.models.versions.delete](/ml/reference/rest/v1beta1/projects.models.versions/delete).
+ rpc DeleteModel(DeleteModelRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { delete: "/v1beta1/{name=projects/*/models/*}" };
+ }
+
+ // Creates a new version of a model from a trained TensorFlow model.
+ //
+ // If the version created in the cloud by this call is the first deployed
+ // version of the specified model, it will be made the default version of the
+ // model. When you add a version to a model that already has one or more
+ // versions, the default version does not automatically change. If you want a
+ // new version to be the default, you must call
+ // [projects.models.versions.setDefault](/ml/reference/rest/v1beta1/projects.models.versions/setDefault).
+ rpc CreateVersion(CreateVersionRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1beta1/{parent=projects/*/models/*}/versions" body: "version" };
+ }
+
+ // Gets basic information about all the versions of a model.
+ //
+ // If you expect that a model has a lot of versions, or if you need to handle
+ // only a limited number of results at a time, you can request that the list
+ // be retrieved in batches (called pages):
+ rpc ListVersions(ListVersionsRequest) returns (ListVersionsResponse) {
+ option (google.api.http) = { get: "/v1beta1/{parent=projects/*/models/*}/versions" };
+ }
+
+ // Gets information about a model version.
+ //
+ // Models can have multiple versions. You can call
+ // [projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list)
+ // to get the same information that this method returns for all of the
+ // versions of a model.
+ rpc GetVersion(GetVersionRequest) returns (Version) {
+ option (google.api.http) = { get: "/v1beta1/{name=projects/*/models/*/versions/*}" };
+ }
+
+ // Deletes a model version.
+ //
+ // Each model can have multiple versions deployed and in use at any given
+ // time. Use this method to remove a single version.
+ //
+ // Note: You cannot delete the version that is set as the default version
+ // of the model unless it is the only remaining version.
+ rpc DeleteVersion(DeleteVersionRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { delete: "/v1beta1/{name=projects/*/models/*/versions/*}" };
+ }
+
+ // Designates a version to be the default for the model.
+ //
+ // The default version is used for prediction requests made against the model
+ // that don't specify a version.
+ //
+ // The first version to be created for a model is automatically set as the
+ // default. You must make any subsequent changes to the default version
+ // setting manually using this method.
+ rpc SetDefaultVersion(SetDefaultVersionRequest) returns (Version) {
+ option (google.api.http) = { post: "/v1beta1/{name=projects/*/models/*/versions/*}:setDefault" body: "*" };
+ }
+}
+
+// Represents a machine learning solution.
+//
+// A model can have multiple versions, each of which is a deployed, trained
+// model ready to receive prediction requests. The model itself is just a
+// container.
+message Model {
+ // Required. The name specified for the model when it was created.
+ //
+ // The model name must be unique within the project it is created in.
+ string name = 1;
+
+ // Optional. The description specified for the model when it was created.
+ string description = 2;
+
+ // Output only. The default version of the model. This version will be used to
+ // handle prediction requests that do not specify a version.
+ //
+ // You can change the default version by calling
+ // [projects.methods.versions.setDefault](/ml/reference/rest/v1beta1/projects.models.versions/setDefault).
+ Version default_version = 3;
+
+ // Optional. The list of regions where the model is going to be deployed.
+ // Currently only one region per model is supported.
+ // Defaults to 'us-central1' if nothing is set.
+ repeated string regions = 4;
+
+ // Optional. If true, enables StackDriver Logging for online prediction.
+ // Default is false.
+ bool online_prediction_logging = 5;
+}
+
+// Represents a version of the model.
+//
+// Each version is a trained model deployed in the cloud, ready to handle
+// prediction requests. A model can have multiple versions. You can get
+// information about all of the versions of a given model by calling
+// [projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list).
+message Version {
+ // Required.The name specified for the version when it was created.
+ //
+ // The version name must be unique within the model it is created in.
+ string name = 1;
+
+ // Optional. The description specified for the version when it was created.
+ string description = 2;
+
+ // Output only. If true, this version will be used to handle prediction
+ // requests that do not specify a version.
+ //
+ // You can change the default version by calling
+ // [projects.methods.versions.setDefault](/ml/reference/rest/v1beta1/projects.models.versions/setDefault).
+ bool is_default = 3;
+
+ // Required. The Google Cloud Storage location of the trained model used to
+ // create the version. See the
+ // [overview of model deployment](/ml/docs/concepts/deployment-overview) for
+ // more informaiton.
+ //
+ // When passing Version to
+ // [projects.models.versions.create](/ml/reference/rest/v1beta1/projects.models.versions/create)
+ // the model service uses the specified location as the source of the model.
+ // Once deployed, the model version is hosted by the prediction service, so
+ // this location is useful only as a historical record.
+ string deployment_uri = 4;
+
+ // Output only. The time the version was created.
+ google.protobuf.Timestamp create_time = 5;
+
+ // Output only. The time the version was last used for prediction.
+ google.protobuf.Timestamp last_use_time = 6;
+
+ // Optional. The Google Cloud ML runtime version to use for this deployment.
+ // If not set, Google Cloud ML will choose a version.
+ string runtime_version = 8;
+
+ // Optional. Manually select the number of nodes to use for serving the
+ // model. If unset (i.e., by default), the number of nodes used to serve
+ // the model automatically scales with traffic. However, care should be
+ // taken to ramp up traffic according to the model's ability to scale. If
+ // your model needs to handle bursts of traffic beyond it's ability to
+ // scale, it is recommended you set this field appropriately.
+ ManualScaling manual_scaling = 9;
+}
+
+// Options for manually scaling a model.
+message ManualScaling {
+ // The number of nodes to allocate for this model. These nodes are always up,
+ // starting from the time the model is deployed, so the cost of operating
+ // this model will be proportional to nodes * number of hours since
+ // deployment.
+ int32 nodes = 1;
+}
+
+// Request message for the CreateModel method.
+message CreateModelRequest {
+ // Required. The project name.
+ //
+ // Authorization: requires `Editor` role on the specified project.
+ string parent = 1;
+
+ // Required. The model to create.
+ Model model = 2;
+}
+
+// Request message for the ListModels method.
+message ListModelsRequest {
+ // Required. The name of the project whose models are to be listed.
+ //
+ // Authorization: requires `Viewer` role on the specified project.
+ string parent = 1;
+
+ // Optional. A page token to request the next page of results.
+ //
+ // You get the token from the `next_page_token` field of the response from
+ // the previous call.
+ string page_token = 4;
+
+ // Optional. The number of models to retrieve per "page" of results. If there
+ // are more remaining results than this number, the response message will
+ // contain a valid value in the `next_page_token` field.
+ //
+ // The default value is 20, and the maximum page size is 100.
+ int32 page_size = 5;
+}
+
+// Response message for the ListModels method.
+message ListModelsResponse {
+ // The list of models.
+ repeated Model models = 1;
+
+ // Optional. Pass this token as the `page_token` field of the request for a
+ // subsequent call.
+ string next_page_token = 2;
+}
+
+// Request message for the GetModel method.
+message GetModelRequest {
+ // Required. The name of the model.
+ //
+ // Authorization: requires `Viewer` role on the parent project.
+ string name = 1;
+}
+
+// Request message for the DeleteModel method.
+message DeleteModelRequest {
+ // Required. The name of the model.
+ //
+ // Authorization: requires `Editor` role on the parent project.
+ string name = 1;
+}
+
+// Uploads the provided trained model version to Cloud Machine Learning.
+message CreateVersionRequest {
+ // Required. The name of the model.
+ //
+ // Authorization: requires `Editor` role on the parent project.
+ string parent = 1;
+
+ // Required. The version details.
+ Version version = 2;
+}
+
+// Request message for the ListVersions method.
+message ListVersionsRequest {
+ // Required. The name of the model for which to list the version.
+ //
+ // Authorization: requires `Viewer` role on the parent project.
+ string parent = 1;
+
+ // Optional. A page token to request the next page of results.
+ //
+ // You get the token from the `next_page_token` field of the response from
+ // the previous call.
+ string page_token = 4;
+
+ // Optional. The number of versions to retrieve per "page" of results. If
+ // there are more remaining results than this number, the response message
+ // will contain a valid value in the `next_page_token` field.
+ //
+ // The default value is 20, and the maximum page size is 100.
+ int32 page_size = 5;
+}
+
+// Response message for the ListVersions method.
+message ListVersionsResponse {
+ // The list of versions.
+ repeated Version versions = 1;
+
+ // Optional. Pass this token as the `page_token` field of the request for a
+ // subsequent call.
+ string next_page_token = 2;
+}
+
+// Request message for the GetVersion method.
+message GetVersionRequest {
+ // Required. The name of the version.
+ //
+ // Authorization: requires `Viewer` role on the parent project.
+ string name = 1;
+}
+
+// Request message for the DeleteVerionRequest method.
+message DeleteVersionRequest {
+ // Required. The name of the version. You can get the names of all the
+ // versions of a model by calling
+ // [projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list).
+ //
+ // Authorization: requires `Editor` role on the parent project.
+ string name = 1;
+}
+
+// Request message for the SetDefaultVersion request.
+message SetDefaultVersionRequest {
+ // Required. The name of the version to make the default for the model. You
+ // can get the names of all the versions of a model by calling
+ // [projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list).
+ //
+ // Authorization: requires `Editor` role on the parent project.
+ string name = 1;
+}
diff --git a/third_party/googleapis/google/cloud/ml/v1beta1/operation_metadata.proto b/third_party/googleapis/google/cloud/ml/v1beta1/operation_metadata.proto
new file mode 100644
index 0000000000..a72647191c
--- /dev/null
+++ b/third_party/googleapis/google/cloud/ml/v1beta1/operation_metadata.proto
@@ -0,0 +1,72 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.ml.v1beta1;
+
+import "google/api/annotations.proto";
+import "google/cloud/ml/v1beta1/model_service.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/ml/v1beta1;ml";
+option java_multiple_files = true;
+option java_outer_classname = "OperationMetadataProto";
+option java_package = "com.google.cloud.ml.api.v1beta1";
+
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Proto file for the Google Cloud Machine Learning Engine.
+// Describes the metadata for longrunning operations.
+
+
+
+// Represents the metadata of the long-running operation.
+message OperationMetadata {
+ // The operation type.
+ enum OperationType {
+ // Unspecified operation type.
+ OPERATION_TYPE_UNSPECIFIED = 0;
+
+ // An operation to create a new version.
+ CREATE_VERSION = 1;
+
+ // An operation to delete an existing version.
+ DELETE_VERSION = 2;
+
+ // An operation to delete an existing model.
+ DELETE_MODEL = 3;
+ }
+
+ // The time the operation was submitted.
+ google.protobuf.Timestamp create_time = 1;
+
+ // The time operation processing started.
+ google.protobuf.Timestamp start_time = 2;
+
+ // The time operation processing completed.
+ google.protobuf.Timestamp end_time = 3;
+
+ // Indicates whether a request to cancel this operation has been made.
+ bool is_cancellation_requested = 4;
+
+ // The operation type.
+ OperationType operation_type = 5;
+
+ // Contains the name of the model associated with the operation.
+ string model_name = 6;
+
+ // Contains the version associated with the operation.
+ Version version = 7;
+}
diff --git a/third_party/googleapis/google/cloud/ml/v1beta1/prediction_service.proto b/third_party/googleapis/google/cloud/ml/v1beta1/prediction_service.proto
new file mode 100644
index 0000000000..6374ca2255
--- /dev/null
+++ b/third_party/googleapis/google/cloud/ml/v1beta1/prediction_service.proto
@@ -0,0 +1,240 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.ml.v1beta1;
+
+import "google/api/annotations.proto";
+import "google/api/httpbody.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/ml/v1beta1;ml";
+option java_multiple_files = true;
+option java_outer_classname = "PredictionServiceProto";
+option java_package = "com.google.cloud.ml.api.v1beta1";
+
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Proto file for the Google Cloud Machine Learning Engine.
+// Describes the online prediction service.
+
+
+
+// The Prediction API, which serves predictions for models managed by
+// ModelService.
+service OnlinePredictionService {
+ // Performs prediction on the data in the request.
+ //
+ // **** REMOVE FROM GENERATED DOCUMENTATION
+ rpc Predict(PredictRequest) returns (google.api.HttpBody) {
+ option (google.api.http) = { post: "/v1beta1/{name=projects/**}:predict" body: "*" };
+ }
+}
+
+// Request for predictions to be issued against a trained model.
+//
+// The body of the request is a single JSON object with a single top-level
+// field:
+//
+// <dl>
+// <dt>instances</dt>
+// <dd>A JSON array containing values representing the instances to use for
+// prediction.</dd>
+// </dl>
+//
+// The structure of each element of the instances list is determined by your
+// model's input definition. Instances can include named inputs or can contain
+// only unlabeled values.
+//
+// Not all data includes named inputs. Some instances will be simple
+// JSON values (boolean, number, or string). However, instances are often lists
+// of simple values, or complex nested lists. Here are some examples of request
+// bodies:
+//
+// CSV data with each row encoded as a string value:
+// <pre>
+// {"instances": ["1.0,true,\\"x\\"", "-2.0,false,\\"y\\""]}
+// </pre>
+// Plain text:
+// <pre>
+// {"instances": ["the quick brown fox", "la bruja le dio"]}
+// </pre>
+// Sentences encoded as lists of words (vectors of strings):
+// <pre>
+// {
+// "instances": [
+// ["the","quick","brown"],
+// ["la","bruja","le"],
+// ...
+// ]
+// }
+// </pre>
+// Floating point scalar values:
+// <pre>
+// {"instances": [0.0, 1.1, 2.2]}
+// </pre>
+// Vectors of integers:
+// <pre>
+// {
+// "instances": [
+// [0, 1, 2],
+// [3, 4, 5],
+// ...
+// ]
+// }
+// </pre>
+// Tensors (in this case, two-dimensional tensors):
+// <pre>
+// {
+// "instances": [
+// [
+// [0, 1, 2],
+// [3, 4, 5]
+// ],
+// ...
+// ]
+// }
+// </pre>
+// Images can be represented different ways. In this encoding scheme the first
+// two dimensions represent the rows and columns of the image, and the third
+// contains lists (vectors) of the R, G, and B values for each pixel.
+// <pre>
+// {
+// "instances": [
+// [
+// [
+// [138, 30, 66],
+// [130, 20, 56],
+// ...
+// ],
+// [
+// [126, 38, 61],
+// [122, 24, 57],
+// ...
+// ],
+// ...
+// ],
+// ...
+// ]
+// }
+// </pre>
+// JSON strings must be encoded as UTF-8. To send binary data, you must
+// base64-encode the data and mark it as binary. To mark a JSON string
+// as binary, replace it with a JSON object with a single attribute named `b64`:
+// <pre>{"b64": "..."} </pre>
+// For example:
+//
+// Two Serialized tf.Examples (fake data, for illustrative purposes only):
+// <pre>
+// {"instances": [{"b64": "X5ad6u"}, {"b64": "IA9j4nx"}]}
+// </pre>
+// Two JPEG image byte strings (fake data, for illustrative purposes only):
+// <pre>
+// {"instances": [{"b64": "ASa8asdf"}, {"b64": "JLK7ljk3"}]}
+// </pre>
+// If your data includes named references, format each instance as a JSON object
+// with the named references as the keys:
+//
+// JSON input data to be preprocessed:
+// <pre>
+// {
+// "instances": [
+// {
+// "a": 1.0,
+// "b": true,
+// "c": "x"
+// },
+// {
+// "a": -2.0,
+// "b": false,
+// "c": "y"
+// }
+// ]
+// }
+// </pre>
+// Some models have an underlying TensorFlow graph that accepts multiple input
+// tensors. In this case, you should use the names of JSON name/value pairs to
+// identify the input tensors, as shown in the following exmaples:
+//
+// For a graph with input tensor aliases "tag" (string) and "image"
+// (base64-encoded string):
+// <pre>
+// {
+// "instances": [
+// {
+// "tag": "beach",
+// "image": {"b64": "ASa8asdf"}
+// },
+// {
+// "tag": "car",
+// "image": {"b64": "JLK7ljk3"}
+// }
+// ]
+// }
+// </pre>
+// For a graph with input tensor aliases "tag" (string) and "image"
+// (3-dimensional array of 8-bit ints):
+// <pre>
+// {
+// "instances": [
+// {
+// "tag": "beach",
+// "image": [
+// [
+// [138, 30, 66],
+// [130, 20, 56],
+// ...
+// ],
+// [
+// [126, 38, 61],
+// [122, 24, 57],
+// ...
+// ],
+// ...
+// ]
+// },
+// {
+// "tag": "car",
+// "image": [
+// [
+// [255, 0, 102],
+// [255, 0, 97],
+// ...
+// ],
+// [
+// [254, 1, 101],
+// [254, 2, 93],
+// ...
+// ],
+// ...
+// ]
+// },
+// ...
+// ]
+// }
+// </pre>
+// If the call is successful, the response body will contain one prediction
+// entry per instance in the request body. If prediction fails for any
+// instance, the response body will contain no predictions and will contian
+// a single error entry instead.
+message PredictRequest {
+ // Required. The resource name of a model or a version.
+ //
+ // Authorization: requires `Viewer` role on the parent project.
+ string name = 1;
+
+ //
+ // Required. The prediction request body.
+ google.api.HttpBody http_body = 2;
+}
diff --git a/third_party/googleapis/google/cloud/ml/v1beta1/project_service.proto b/third_party/googleapis/google/cloud/ml/v1beta1/project_service.proto
new file mode 100644
index 0000000000..1f8b2d5e0a
--- /dev/null
+++ b/third_party/googleapis/google/cloud/ml/v1beta1/project_service.proto
@@ -0,0 +1,59 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.ml.v1beta1;
+
+import "google/api/annotations.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/ml/v1beta1;ml";
+option java_multiple_files = true;
+option java_outer_classname = "ProjectServiceProto";
+option java_package = "com.google.cloud.ml.api.v1beta1";
+
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Proto file for the Google Cloud Machine Learning Engine.
+// Describes the project management service.
+
+
+
+// Allows retrieving project related information.
+service ProjectManagementService {
+ // Get the service account information associated with your project. You need
+ // this information in order to grant the service account persmissions for
+ // the Google Cloud Storage location where you put your model training code
+ // for training the model with Google Cloud Machine Learning.
+ rpc GetConfig(GetConfigRequest) returns (GetConfigResponse) {
+ option (google.api.http) = { get: "/v1beta1/{name=projects/*}:getConfig" };
+ }
+}
+
+// Requests service account information associated with a project.
+message GetConfigRequest {
+ // Required. The project name.
+ //
+ // Authorization: requires `Viewer` role on the specified project.
+ string name = 1;
+}
+
+// Returns service account information associated with a project.
+message GetConfigResponse {
+ // The service account Cloud ML uses to access resources in the project.
+ string service_account = 1;
+
+ // The project number for `service_account`.
+ int64 service_account_project = 2;
+}
diff --git a/third_party/googleapis/google/cloud/runtimeconfig/README.md b/third_party/googleapis/google/cloud/runtimeconfig/README.md
new file mode 100644
index 0000000000..49c1d8717f
--- /dev/null
+++ b/third_party/googleapis/google/cloud/runtimeconfig/README.md
@@ -0,0 +1,39 @@
+[TOC]
+
+# Introduction
+
+The RuntimeConfig service provides Google Cloud Platform users the ability to
+dynamically configure your service.
+
+The RuntimConfig service creates and manages RuntimeConfig resources
+within a Google Cloud Project and various variables within said resource.
+
+## Details
+
+Each cloud project can create multiple **Config** objects. A **Config** object
+by itself does not contain any configuration information, but rather is a
+logical grouping of variables. Variable names are hierarchical and follow file
+system style, where only leaf nodes can contain values.
+
+For example, you can have a configuration called *Flags*. Within that
+configuration object, you can create the following variables.
+
+* `/ports/service_port`
+* `/ports/monitoring_port`
+* `/ports/admin_port`
+
+This creates three variables: `/ports/serve_port`, `/ports/monitoring_port`,
+`/ports/admin_port`. Note that `/ports` cannot have a value but it can be
+listed.
+
+### Setup
+
+In order to make requests to RuntimeConfig service, you need to enable the API
+for your project.
+
+To achieve that, go to the
+[Google Cloud Console](https://console.cloud.google.com/apis/api/runtimeconfig.googleapis.com/overview)
+and enable *Google Cloud RuntimeConfig API* for your project.
+
+The documentation for this service is located
+[here](https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/).
diff --git a/third_party/googleapis/google/cloud/runtimeconfig/v1beta1/resources.proto b/third_party/googleapis/google/cloud/runtimeconfig/v1beta1/resources.proto
new file mode 100644
index 0000000000..908722a0ee
--- /dev/null
+++ b/third_party/googleapis/google/cloud/runtimeconfig/v1beta1/resources.proto
@@ -0,0 +1,208 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.runtimeconfig.v1beta1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+
+option csharp_namespace = "Google.Cloud.RuntimeConfig.V1Beta1";
+option go_package = "google.golang.org/genproto/googleapis/cloud/runtimeconfig/v1beta1;runtimeconfig";
+option java_multiple_files = true;
+option java_package = "com.google.cloud.runtimeconfig.v1beta1";
+
+
+// A RuntimeConfig resource is the primary resource in the Cloud RuntimeConfig
+// service. A RuntimeConfig resource consists of metadata and a hierarchy of
+// variables.
+message RuntimeConfig {
+ // The resource name of a runtime config. The name must have the format:
+ //
+ // projects/[PROJECT_ID]/configs/[CONFIG_NAME]
+ //
+ // The `[PROJECT_ID]` must be a valid project ID, and `[CONFIG_NAME]` is an
+ // arbitrary name that matches RFC 1035 segment specification. The length of
+ // `[CONFIG_NAME]` must be less than 64 bytes.
+ //
+ // You pick the RuntimeConfig resource name, but the server will validate that
+ // the name adheres to this format. After you create the resource, you cannot
+ // change the resource's name.
+ string name = 1;
+
+ // An optional description of the RuntimeConfig object.
+ string description = 2;
+}
+
+// Describes a single variable within a RuntimeConfig resource.
+// The name denotes the hierarchical variable name. For example,
+// `ports/serving_port` is a valid variable name. The variable value is an
+// opaque string and only leaf variables can have values (that is, variables
+// that do not have any child variables).
+message Variable {
+ // The name of the variable resource, in the format:
+ //
+ // projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIABLE_NAME]
+ //
+ // The `[PROJECT_ID]` must be a valid project ID, `[CONFIG_NAME]` must be a
+ // valid RuntimeConfig reource and `[VARIABLE_NAME]` follows Unix file system
+ // file path naming.
+ //
+ // The `[VARIABLE_NAME]` can contain ASCII letters, numbers, slashes and
+ // dashes. Slashes are used as path element separators and are not part of the
+ // `[VARIABLE_NAME]` itself, so `[VARIABLE_NAME]` must contain at least one
+ // non-slash character. Multiple slashes are coalesced into single slash
+ // character. Each path segment should follow RFC 1035 segment specification.
+ // The length of a `[VARIABLE_NAME]` must be less than 256 bytes.
+ //
+ // Once you create a variable, you cannot change the variable name.
+ string name = 1;
+
+ // The the value of the variable. It can be either a binary or a string
+ // value. You must specify one of either `value` or `text`. Specifying both
+ // will cause the server to return an error.
+ oneof contents {
+ // The binary value of the variable. The length of the value must be less
+ // than 4096 bytes. Empty values are also accepted. The value must be
+ // base64 encoded. Only one of `value` or `text` can be set.
+ bytes value = 2;
+
+ // The string value of the variable. The length of the value must be less
+ // than 4096 bytes. Empty values are also accepted. For example,
+ // `text: "my text value"`. The string must be valid UTF-8.
+ string text = 5;
+ }
+
+ // [Output Only] The time of the last variable update.
+ google.protobuf.Timestamp update_time = 3;
+
+ // [Ouput only] The current state of the variable. The variable state indicates
+ // the outcome of the `variables().watch` call and is visible through the
+ // `get` and `list` calls.
+ VariableState state = 4;
+}
+
+// The condition that a Waiter resource is waiting for.
+message EndCondition {
+ // A Cardinality condition for the Waiter resource. A cardinality condition is
+ // met when the number of variables under a specified path prefix reaches a
+ // predefined number. For example, if you set a Cardinality condition where
+ // the `path` is set to `/foo` and the number of paths is set to 2, the
+ // following variables would meet the condition in a RuntimeConfig resource:
+ //
+ // + `/foo/variable1 = "value1"`
+ // + `/foo/variable2 = "value2"`
+ // + `/bar/variable3 = "value3"`
+ //
+ // It would not would not satisify the same condition with the `number` set to
+ // 3, however, because there is only 2 paths that start with `/foo`.
+ // Cardinality conditions are recursive; all subtrees under the specific
+ // path prefix are counted.
+ message Cardinality {
+ // The root of the variable subtree to monitor. For example, `/foo`.
+ string path = 1;
+
+ // The number variables under the `path` that must exist to meet this
+ // condition. Defaults to 1 if not specified.
+ int32 number = 2;
+ }
+
+ // The condition oneof holds the available condition types for this
+ // EndCondition. Currently, the only available type is Cardinality.
+ oneof condition {
+ // The cardinality of the `EndCondition`.
+ Cardinality cardinality = 1;
+ }
+}
+
+// A Waiter resource waits for some end condition within a RuntimeConfig resource
+// to be met before it returns. For example, assume you have a distributed
+// system where each node writes to a Variable resource indidicating the node's
+// readiness as part of the startup process.
+//
+// You then configure a Waiter resource with the success condition set to wait
+// until some number of nodes have checked in. Afterwards, your application
+// runs some arbitrary code after the condition has been met and the waiter
+// returns successfully.
+//
+// Once created, a Waiter resource is immutable.
+//
+// To learn more about using waiters, read the
+// [Creating a Waiter](/deployment-manager/runtime-configurator/creating-a-waiter)
+// documentation.
+message Waiter {
+ // The name of the Waiter resource, in the format:
+ //
+ // projects/[PROJECT_ID]/configs/[CONFIG_NAME]/waiters/[WAITER_NAME]
+ //
+ // The `[PROJECT_ID]` must be a valid Google Cloud project ID,
+ // the `[CONFIG_NAME]` must be a valid RuntimeConfig resource, the
+ // `[WAITER_NAME]` must match RFC 1035 segment specification, and the length
+ // of `[WAITER_NAME]` must be less than 64 bytes.
+ //
+ // After you create a Waiter resource, you cannot change the resource name.
+ string name = 1;
+
+ // [Required] Specifies the timeout of the waiter in seconds, beginning from
+ // the instant that `waiters().create` method is called. If this time elapses
+ // before the success or failure conditions are met, the waiter fails and sets
+ // the `error` code to `DEADLINE_EXCEEDED`.
+ google.protobuf.Duration timeout = 2;
+
+ // [Optional] The failure condition of this waiter. If this condition is met,
+ // `done` will be set to `true` and the `error` code will be set to `ABORTED`.
+ // The failure condition takes precedence over the success condition. If both
+ // conditions are met, a failure will be indicated. This value is optional; if
+ // no failure condition is set, the only failure scenario will be a timeout.
+ EndCondition failure = 3;
+
+ // [Required] The success condition. If this condition is met, `done` will be
+ // set to `true` and the `error` value will remain unset. The failure condition
+ // takes precedence over the success condition. If both conditions are met, a
+ // failure will be indicated.
+ EndCondition success = 4;
+
+ // [Output Only] The instant at which this Waiter resource was created. Adding
+ // the value of `timeout` to this instant yields the timeout deadline for the
+ // waiter.
+ google.protobuf.Timestamp create_time = 5;
+
+ // [Output Only] If the value is `false`, it means the waiter is still waiting
+ // for one of its conditions to be met.
+ //
+ // If true, the waiter has finished. If the waiter finished due to a timeout
+ // or failure, `error` will be set.
+ bool done = 6;
+
+ // [Output Only] If the waiter ended due to a failure or timeout, this value
+ // will be set.
+ google.rpc.Status error = 7;
+}
+
+// The `VariableState` describes the last known state of the variable and is
+// used during a `variables().watch` call to distinguish the state of the
+// variable.
+enum VariableState {
+ // Default variable state.
+ VARIABLE_STATE_UNSPECIFIED = 0;
+
+ // The variable was updated, while `variables().watch` was executing.
+ UPDATED = 1;
+
+ // The variable was deleted, while `variables().watch` was executing.
+ DELETED = 2;
+}
diff --git a/third_party/googleapis/google/cloud/runtimeconfig/v1beta1/runtimeconfig.proto b/third_party/googleapis/google/cloud/runtimeconfig/v1beta1/runtimeconfig.proto
new file mode 100644
index 0000000000..4ebcfadf2f
--- /dev/null
+++ b/third_party/googleapis/google/cloud/runtimeconfig/v1beta1/runtimeconfig.proto
@@ -0,0 +1,410 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.runtimeconfig.v1beta1;
+
+import "google/api/annotations.proto";
+import "google/cloud/runtimeconfig/v1beta1/resources.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/timestamp.proto";
+
+option csharp_namespace = "Google.Cloud.RuntimeConfig.V1Beta1";
+option go_package = "google.golang.org/genproto/googleapis/cloud/runtimeconfig/v1beta1;runtimeconfig";
+option java_multiple_files = true;
+option java_package = "com.google.cloud.runtimeconfig.v1beta1";
+
+
+// RuntimeConfig API represents configuration objects and operations on those
+// configuration objects.
+// RuntimeConfig objects consist of Variables logically grouped in the those
+// objects.
+// Variables are simple key-value pairs. Variables can be watched for changes or
+// deletions. Variable key can be hieararchical, e.g. ports/serving_port,
+// ports/monitoring_port, etc. Variable names can be hierarchical. No variable
+// name can be prefix of another.
+// Config objects represent logical containers for variables, e.g. flags,
+// passwords, etc.
+service RuntimeConfigManager {
+ // Lists all the RuntimeConfig resources within project.
+ rpc ListConfigs(ListConfigsRequest) returns (ListConfigsResponse) {
+ option (google.api.http) = { get: "/v1beta1/{parent=projects/*}/configs" };
+ }
+
+ // Gets information about a RuntimeConfig resource.
+ rpc GetConfig(GetConfigRequest) returns (RuntimeConfig) {
+ option (google.api.http) = { get: "/v1beta1/{name=projects/*/configs/*}" };
+ }
+
+ // Creates a new RuntimeConfig resource. The configuration name must be
+ // unique within project.
+ rpc CreateConfig(CreateConfigRequest) returns (RuntimeConfig) {
+ option (google.api.http) = { post: "/v1beta1/{parent=projects/*}/configs" body: "config" };
+ }
+
+ // Updates a RuntimeConfig resource. The configuration must exist beforehand.
+ rpc UpdateConfig(UpdateConfigRequest) returns (RuntimeConfig) {
+ option (google.api.http) = { put: "/v1beta1/{name=projects/*/configs/*}" body: "config" };
+ }
+
+ // Deletes a RuntimeConfig resource.
+ rpc DeleteConfig(DeleteConfigRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1beta1/{name=projects/*/configs/*}" };
+ }
+
+ // Lists variables within given a configuration, matching any provided filters.
+ // This only lists variable names, not the values, unless `return_values` is
+ // true, in which case only variables that user has IAM permission to
+ // GetVariable will be returned.
+ rpc ListVariables(ListVariablesRequest) returns (ListVariablesResponse) {
+ option (google.api.http) = { get: "/v1beta1/{parent=projects/*/configs/*}/variables" };
+ }
+
+ // Gets information about a single variable.
+ rpc GetVariable(GetVariableRequest) returns (Variable) {
+ option (google.api.http) = { get: "/v1beta1/{name=projects/*/configs/*/variables/**}" };
+ }
+
+ // Watches a specific variable and waits for a change in the variable's value.
+ // When there is a change, this method returns the new value or times out.
+ //
+ // If a variable is deleted while being watched, the `variableState` state is
+ // set to `DELETED` and the method returns the last known variable `value`.
+ //
+ // If you set the deadline for watching to a larger value than internal timeout
+ // (60 seconds), the current variable value is returned and the `variableState`
+ // will be `VARIABLE_STATE_UNSPECIFIED`.
+ //
+ // To learn more about creating a watcher, read the
+ // [Watching a Variable for Changes](/deployment-manager/runtime-configurator/watching-a-variable)
+ // documentation.
+ rpc WatchVariable(WatchVariableRequest) returns (Variable) {
+ option (google.api.http) = { post: "/v1beta1/{name=projects/*/configs/*/variables/**}:watch" body: "*" };
+ }
+
+ // Creates a variable within the given configuration. You cannot create
+ // a variable with a name that is a prefix of an existing variable name, or a
+ // name that has an existing variable name as a prefix.
+ //
+ // To learn more about creating a variable, read the
+ // [Setting and Getting Data](/deployment-manager/runtime-configurator/set-and-get-variables)
+ // documentation.
+ rpc CreateVariable(CreateVariableRequest) returns (Variable) {
+ option (google.api.http) = { post: "/v1beta1/{parent=projects/*/configs/*}/variables" body: "variable" };
+ }
+
+ // Updates an existing variable with a new value.
+ rpc UpdateVariable(UpdateVariableRequest) returns (Variable) {
+ option (google.api.http) = { put: "/v1beta1/{name=projects/*/configs/*/variables/**}" body: "variable" };
+ }
+
+ // Deletes a variable or multiple variables.
+ //
+ // If you specify a variable name, then that variable is deleted. If you
+ // specify a prefix and `recursive` is true, then all variables with that
+ // prefix are deleted. You must set a `recursive` to true if you delete
+ // variables by prefix.
+ rpc DeleteVariable(DeleteVariableRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1beta1/{name=projects/*/configs/*/variables/**}" };
+ }
+
+ // List waiters within the given configuration.
+ rpc ListWaiters(ListWaitersRequest) returns (ListWaitersResponse) {
+ option (google.api.http) = { get: "/v1beta1/{parent=projects/*/configs/*}/waiters" };
+ }
+
+ // Gets information about a single waiter.
+ rpc GetWaiter(GetWaiterRequest) returns (Waiter) {
+ option (google.api.http) = { get: "/v1beta1/{name=projects/*/configs/*/waiters/*}" };
+ }
+
+ // Creates a Waiter resource. This operation returns a long-running Operation
+ // resource which can be polled for completion. However, a waiter with the
+ // given name will exist (and can be retrieved) prior to the operation
+ // completing. If the operation fails, the failed Waiter resource will
+ // still exist and must be deleted prior to subsequent creation attempts.
+ rpc CreateWaiter(CreateWaiterRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1beta1/{parent=projects/*/configs/*}/waiters" body: "waiter" };
+ }
+
+ // Deletes the waiter with the specified name.
+ rpc DeleteWaiter(DeleteWaiterRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1beta1/{name=projects/*/configs/*/waiters/*}" };
+ }
+}
+
+// Request for the `ListConfigs()` method.
+message ListConfigsRequest {
+ // The [project ID](https://support.google.com/cloud/answer/6158840?hl=en&ref_topic=6158848)
+ // for this request, in the format `projects/[PROJECT_ID]`.
+ string parent = 1;
+
+ // Specifies the number of results to return per page. If there are fewer
+ // elements than the specified number, returns all elements.
+ int32 page_size = 2;
+
+ // Specifies a page token to use. Set `pageToken` to a `nextPageToken`
+ // returned by a previous list request to get the next page of results.
+ string page_token = 3;
+}
+
+// `ListConfigs()` returns the following response. The order of returned
+// objects is arbitrary; that is, it is not ordered in any particular way.
+message ListConfigsResponse {
+ // A list of the configurations in the project. The order of returned
+ // objects is arbitrary; that is, it is not ordered in any particular way.
+ repeated RuntimeConfig configs = 1;
+
+ // This token allows you to get the next page of results for list requests.
+ // If the number of results is larger than `pageSize`, use the `nextPageToken`
+ // as a value for the query parameter `pageToken` in the next list request.
+ // Subsequent list requests will have their own `nextPageToken` to continue
+ // paging through the results
+ string next_page_token = 2;
+}
+
+// Gets a RuntimeConfig resource.
+message GetConfigRequest {
+ // The name of the RuntimeConfig resource to retrieve, in the format:
+ //
+ // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`
+ string name = 2;
+}
+
+// Creates a RuntimeConfig resource.
+message CreateConfigRequest {
+ // The [project ID](https://support.google.com/cloud/answer/6158840?hl=en&ref_topic=6158848)
+ // for this request, in the format `projects/[PROJECT_ID]`.
+ string parent = 1;
+
+ // The RuntimeConfig to create.
+ RuntimeConfig config = 2;
+
+ // An optional but recommended unique `request_id`. If the server
+ // receives two `create()` requests with the same
+ // `request_id`, then the second request will be ignored and the
+ // first resource created and stored in the backend is returned.
+ // Empty `request_id` fields are ignored.
+ //
+ // It is responsibility of the client to ensure uniqueness of the
+ // `request_id` strings.
+ //
+ // `request_id` strings are limited to 64 characters.
+ string request_id = 3;
+}
+
+// Request message for `UpdateConfig()` method.
+message UpdateConfigRequest {
+ // The name of the RuntimeConfig resource to update, in the format:
+ //
+ // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`
+ string name = 1;
+
+ // The config resource to update.
+ RuntimeConfig config = 2;
+}
+
+// Request for the `DeleteConfig()` method.
+message DeleteConfigRequest {
+ // The RuntimeConfig resource to delete, in the format:
+ //
+ // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`
+ string name = 1;
+}
+
+// Request for the `ListVariables()` method.
+message ListVariablesRequest {
+ // The path to the RuntimeConfig resource for which you want to list variables.
+ // The configuration must exist beforehand; the path must by in the format:
+ //
+ // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`
+ string parent = 1;
+
+ // Filters variables by matching the specified filter. For example:
+ //
+ // `projects/example-project/config/[CONFIG_NAME]/variables/example-variable`.
+ string filter = 2;
+
+ // Specifies the number of results to return per page. If there are fewer
+ // elements than the specified number, returns all elements.
+ int32 page_size = 3;
+
+ // Specifies a page token to use. Set `pageToken` to a `nextPageToken`
+ // returned by a previous list request to get the next page of results.
+ string page_token = 4;
+
+ // The flag indicates whether the user wants to return values of variables.
+ // If true, then only those variables that user has IAM GetVariable permission
+ // will be returned along with their values.
+ bool return_values = 5;
+}
+
+// Response for the `ListVariables()` method.
+message ListVariablesResponse {
+ // A list of variables and their values. The order of returned variable
+ // objects is arbitrary.
+ repeated Variable variables = 1;
+
+ // This token allows you to get the next page of results for list requests.
+ // If the number of results is larger than `pageSize`, use the `nextPageToken`
+ // as a value for the query parameter `pageToken` in the next list request.
+ // Subsequent list requests will have their own `nextPageToken` to continue
+ // paging through the results
+ string next_page_token = 2;
+}
+
+// Request for the `WatchVariable()` method.
+message WatchVariableRequest {
+ // The name of the variable to watch, in the format:
+ //
+ // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`
+ string name = 1;
+
+ // If specified, checks the current timestamp of the variable and if the
+ // current timestamp is newer than `newerThan` timestamp, the method returns
+ // immediately.
+ //
+ // If not specified or the variable has an older timestamp, the watcher waits
+ // for a the value to change before returning.
+ google.protobuf.Timestamp newer_than = 4;
+}
+
+// Request for the `GetVariable()` method.
+message GetVariableRequest {
+ // The name of the variable to return, in the format:
+ //
+ // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIBLE_NAME]`
+ string name = 1;
+}
+
+// Request for the `CreateVariable()` method.
+message CreateVariableRequest {
+ // The path to the RutimeConfig resource that this variable should belong to.
+ // The configuration must exist beforehand; the path must by in the format:
+ //
+ // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`
+ string parent = 1;
+
+ // The variable to create.
+ Variable variable = 2;
+
+ // An optional but recommended unique `request_id`. If the server
+ // receives two `create()` requests with the same
+ // `request_id`, then the second request will be ignored and the
+ // first resource created and stored in the backend is returned.
+ // Empty `request_id` fields are ignored.
+ //
+ // It is responsibility of the client to ensure uniqueness of the
+ // `request_id` strings.
+ //
+ // `request_id` strings are limited to 64 characters.
+ string request_id = 3;
+}
+
+// Request for the `UpdateVariable()` method.
+message UpdateVariableRequest {
+ // The name of the variable to update, in the format:
+ //
+ // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIABLE_NAME]`
+ string name = 1;
+
+ // The variable to update.
+ Variable variable = 2;
+}
+
+// Request for the `DeleteVariable()` method.
+message DeleteVariableRequest {
+ // The name of the variable to delete, in the format:
+ //
+ // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIABLE_NAME]`
+ string name = 1;
+
+ // Set to `true` to recursively delete multiple variables with the same
+ // prefix.
+ bool recursive = 2;
+}
+
+// Request for the `ListWaiters()` method.
+message ListWaitersRequest {
+ // The path to the configuration for which you want to get a list of waiters.
+ // The configuration must exist beforehand; the path must by in the format:
+ //
+ // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`
+ string parent = 1;
+
+ // Specifies the number of results to return per page. If there are fewer
+ // elements than the specified number, returns all elements.
+ int32 page_size = 2;
+
+ // Specifies a page token to use. Set `pageToken` to a `nextPageToken`
+ // returned by a previous list request to get the next page of results.
+ string page_token = 3;
+}
+
+// Response for the `ListWaiters()` method.
+// Order of returned waiter objects is arbitrary.
+message ListWaitersResponse {
+ // Found waiters in the project.
+ repeated Waiter waiters = 1;
+
+ // This token allows you to get the next page of results for list requests.
+ // If the number of results is larger than `pageSize`, use the `nextPageToken`
+ // as a value for the query parameter `pageToken` in the next list request.
+ // Subsequent list requests will have their own `nextPageToken` to continue
+ // paging through the results
+ string next_page_token = 2;
+}
+
+// Request for the `GetWaiter()` method.
+message GetWaiterRequest {
+ // The fully-qualified name of the Waiter resource object to retrieve, in the
+ // format:
+ //
+ // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/waiters/[WAITER_NAME]`
+ string name = 1;
+}
+
+// Request message for `CreateWaiter()` method.
+message CreateWaiterRequest {
+ // The path to the configuration that will own the waiter.
+ // The configuration must exist beforehand; the path must by in the format:
+ //
+ // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`.
+ string parent = 1;
+
+ // The Waiter resource to create.
+ Waiter waiter = 2;
+
+ // An optional but recommended unique `request_id`. If the server
+ // receives two `create()` requests with the same
+ // `request_id`, then the second request will be ignored and the
+ // first resource created and stored in the backend is returned.
+ // Empty `request_id` fields are ignored.
+ //
+ // It is responsibility of the client to ensure uniqueness of the
+ // `request_id` strings.
+ //
+ // `request_id` strings are limited to 64 characters.
+ string request_id = 3;
+}
+
+// Request for the `DeleteWaiter()` method.
+message DeleteWaiterRequest {
+ // The Waiter resource to delete, in the format:
+ //
+ // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/waiters/[WAITER_NAME]`
+ string name = 1;
+}
diff --git a/third_party/googleapis/google/cloud/speech/README.md b/third_party/googleapis/google/cloud/speech/README.md
new file mode 100644
index 0000000000..5a2622cc64
--- /dev/null
+++ b/third_party/googleapis/google/cloud/speech/README.md
@@ -0,0 +1,3 @@
+# Introduction
+
+The Google Cloud Speech API provides speech recognition as a service.
diff --git a/third_party/googleapis/google/cloud/speech/cloud_speech_v1.yaml b/third_party/googleapis/google/cloud/speech/cloud_speech_v1.yaml
new file mode 100644
index 0000000000..92b2314168
--- /dev/null
+++ b/third_party/googleapis/google/cloud/speech/cloud_speech_v1.yaml
@@ -0,0 +1,20 @@
+# Google Cloud Speech API service configuration
+
+type: google.api.Service
+config_version: 3
+name: speech.googleapis.com
+
+title: Google Cloud Speech API
+
+documentation:
+ summary:
+ Google Cloud Speech API.
+
+apis:
+- name: google.cloud.speech.v1.Speech
+
+authentication:
+ rules:
+ - selector: '*'
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/cloud-platform
diff --git a/third_party/googleapis/google/cloud/speech/cloud_speech_v1beta1.yaml b/third_party/googleapis/google/cloud/speech/cloud_speech_v1beta1.yaml
new file mode 100644
index 0000000000..c64a81aaa8
--- /dev/null
+++ b/third_party/googleapis/google/cloud/speech/cloud_speech_v1beta1.yaml
@@ -0,0 +1,20 @@
+# Google Cloud Speech API service configuration
+
+type: google.api.Service
+config_version: 3
+name: speech.googleapis.com
+
+title: Google Cloud Speech API
+
+documentation:
+ summary:
+ Google Cloud Speech API.
+
+apis:
+- name: google.cloud.speech.v1beta1.Speech
+
+authentication:
+ rules:
+ - selector: '*'
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/cloud-platform
diff --git a/third_party/googleapis/google/cloud/speech/v1/cloud_speech.proto b/third_party/googleapis/google/cloud/speech/v1/cloud_speech.proto
new file mode 100644
index 0000000000..c96426e86a
--- /dev/null
+++ b/third_party/googleapis/google/cloud/speech/v1/cloud_speech.proto
@@ -0,0 +1,407 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.speech.v1;
+
+import "google/api/annotations.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/any.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/speech/v1;speech";
+option java_multiple_files = true;
+option java_outer_classname = "SpeechProto";
+option java_package = "com.google.cloud.speech.v1";
+
+
+// Service that implements Google Cloud Speech API.
+service Speech {
+ // Performs synchronous speech recognition: receive results after all audio
+ // has been sent and processed.
+ rpc Recognize(RecognizeRequest) returns (RecognizeResponse) {
+ option (google.api.http) = { post: "/v1/speech:recognize" body: "*" };
+ }
+
+ // Performs asynchronous speech recognition: receive results via the
+ // google.longrunning.Operations interface. Returns either an
+ // `Operation.error` or an `Operation.response` which contains
+ // a `LongRunningRecognizeResponse` message.
+ rpc LongRunningRecognize(LongRunningRecognizeRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1/speech:longrunningrecognize" body: "*" };
+ }
+
+ // Performs bidirectional streaming speech recognition: receive results while
+ // sending audio. This method is only available via the gRPC API (not REST).
+ rpc StreamingRecognize(stream StreamingRecognizeRequest) returns (stream StreamingRecognizeResponse);
+}
+
+// The top-level message sent by the client for the `Recognize` method.
+message RecognizeRequest {
+ // *Required* Provides information to the recognizer that specifies how to
+ // process the request.
+ RecognitionConfig config = 1;
+
+ // *Required* The audio data to be recognized.
+ RecognitionAudio audio = 2;
+}
+
+// The top-level message sent by the client for the `LongRunningRecognize`
+// method.
+message LongRunningRecognizeRequest {
+ // *Required* Provides information to the recognizer that specifies how to
+ // process the request.
+ RecognitionConfig config = 1;
+
+ // *Required* The audio data to be recognized.
+ RecognitionAudio audio = 2;
+}
+
+// The top-level message sent by the client for the `StreamingRecognize` method.
+// Multiple `StreamingRecognizeRequest` messages are sent. The first message
+// must contain a `streaming_config` message and must not contain `audio` data.
+// All subsequent messages must contain `audio` data and must not contain a
+// `streaming_config` message.
+message StreamingRecognizeRequest {
+ oneof streaming_request {
+ // Provides information to the recognizer that specifies how to process the
+ // request. The first `StreamingRecognizeRequest` message must contain a
+ // `streaming_config` message.
+ StreamingRecognitionConfig streaming_config = 1;
+
+ // The audio data to be recognized. Sequential chunks of audio data are sent
+ // in sequential `StreamingRecognizeRequest` messages. The first
+ // `StreamingRecognizeRequest` message must not contain `audio_content` data
+ // and all subsequent `StreamingRecognizeRequest` messages must contain
+ // `audio_content` data. The audio bytes must be encoded as specified in
+ // `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
+ // pure binary representation (not base64). See
+ // [audio limits](https://cloud.google.com/speech/limits#content).
+ bytes audio_content = 2;
+ }
+}
+
+// Provides information to the recognizer that specifies how to process the
+// request.
+message StreamingRecognitionConfig {
+ // *Required* Provides information to the recognizer that specifies how to
+ // process the request.
+ RecognitionConfig config = 1;
+
+ // *Optional* If `false` or omitted, the recognizer will perform continuous
+ // recognition (continuing to wait for and process audio even if the user
+ // pauses speaking) until the client closes the input stream (gRPC API) or
+ // until the maximum time limit has been reached. May return multiple
+ // `StreamingRecognitionResult`s with the `is_final` flag set to `true`.
+ //
+ // If `true`, the recognizer will detect a single spoken utterance. When it
+ // detects that the user has paused or stopped speaking, it will return an
+ // `END_OF_SINGLE_UTTERANCE` event and cease recognition. It will return no
+ // more than one `StreamingRecognitionResult` with the `is_final` flag set to
+ // `true`.
+ bool single_utterance = 2;
+
+ // *Optional* If `true`, interim results (tentative hypotheses) may be
+ // returned as they become available (these interim results are indicated with
+ // the `is_final=false` flag).
+ // If `false` or omitted, only `is_final=true` result(s) are returned.
+ bool interim_results = 3;
+}
+
+// Provides information to the recognizer that specifies how to process the
+// request.
+message RecognitionConfig {
+ // Audio encoding of the data sent in the audio message. All encodings support
+ // only 1 channel (mono) audio. Only `FLAC` includes a header that describes
+ // the bytes of audio that follow the header. The other encodings are raw
+ // audio bytes with no header.
+ //
+ // For best results, the audio source should be captured and transmitted using
+ // a lossless encoding (`FLAC` or `LINEAR16`). Recognition accuracy may be
+ // reduced if lossy codecs, which include the other codecs listed in
+ // this section, are used to capture or transmit the audio, particularly if
+ // background noise is present.
+ enum AudioEncoding {
+ // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][].
+ ENCODING_UNSPECIFIED = 0;
+
+ // Uncompressed 16-bit signed little-endian samples (Linear PCM).
+ LINEAR16 = 1;
+
+ // [`FLAC`](https://xiph.org/flac/documentation.html) (Free Lossless Audio
+ // Codec) is the recommended encoding because it is
+ // lossless--therefore recognition is not compromised--and
+ // requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
+ // encoding supports 16-bit and 24-bit samples, however, not all fields in
+ // `STREAMINFO` are supported.
+ FLAC = 2;
+
+ // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
+ MULAW = 3;
+
+ // Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
+ AMR = 4;
+
+ // Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
+ AMR_WB = 5;
+
+ // Opus encoded audio frames in Ogg container
+ // ([OggOpus](https://wiki.xiph.org/OggOpus)).
+ // `sample_rate_hertz` must be 16000.
+ OGG_OPUS = 6;
+
+ // Although the use of lossy encodings is not recommended, if a very low
+ // bitrate encoding is required, `OGG_OPUS` is highly preferred over
+ // Speex encoding. The [Speex](https://speex.org/) encoding supported by
+ // Cloud Speech API has a header byte in each block, as in MIME type
+ // `audio/x-speex-with-header-byte`.
+ // It is a variant of the RTP Speex encoding defined in
+ // [RFC 5574](https://tools.ietf.org/html/rfc5574).
+ // The stream is a sequence of blocks, one block per RTP packet. Each block
+ // starts with a byte containing the length of the block, in bytes, followed
+ // by one or more frames of Speex data, padded to an integral number of
+ // bytes (octets) as specified in RFC 5574. In other words, each RTP header
+ // is replaced with a single byte containing the block length. Only Speex
+ // wideband is supported. `sample_rate_hertz` must be 16000.
+ SPEEX_WITH_HEADER_BYTE = 7;
+ }
+
+ // *Required* Encoding of audio data sent in all `RecognitionAudio` messages.
+ AudioEncoding encoding = 1;
+
+ // *Required* Sample rate in Hertz of the audio data sent in all
+ // `RecognitionAudio` messages. Valid values are: 8000-48000.
+ // 16000 is optimal. For best results, set the sampling rate of the audio
+ // source to 16000 Hz. If that's not possible, use the native sample rate of
+ // the audio source (instead of re-sampling).
+ int32 sample_rate_hertz = 2;
+
+ // *Required* The language of the supplied audio as a
+ // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
+ // Example: "en-US".
+ // See [Language Support](https://cloud.google.com/speech/docs/languages)
+ // for a list of the currently supported language codes.
+ string language_code = 3;
+
+ // *Optional* Maximum number of recognition hypotheses to be returned.
+ // Specifically, the maximum number of `SpeechRecognitionAlternative` messages
+ // within each `SpeechRecognitionResult`.
+ // The server may return fewer than `max_alternatives`.
+ // Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
+ // one. If omitted, will return a maximum of one.
+ int32 max_alternatives = 4;
+
+ // *Optional* If set to `true`, the server will attempt to filter out
+ // profanities, replacing all but the initial character in each filtered word
+ // with asterisks, e.g. "f***". If set to `false` or omitted, profanities
+ // won't be filtered out.
+ bool profanity_filter = 5;
+
+ // *Optional* A means to provide context to assist the speech recognition.
+ repeated SpeechContext speech_contexts = 6;
+}
+
+// Provides "hints" to the speech recognizer to favor specific words and phrases
+// in the results.
+message SpeechContext {
+ // *Optional* A list of strings containing words and phrases "hints" so that
+ // the speech recognition is more likely to recognize them. This can be used
+ // to improve the accuracy for specific words and phrases, for example, if
+ // specific commands are typically spoken by the user. This can also be used
+ // to add additional words to the vocabulary of the recognizer. See
+ // [usage limits](https://cloud.google.com/speech/limits#content).
+ repeated string phrases = 1;
+}
+
+// Contains audio data in the encoding specified in the `RecognitionConfig`.
+// Either `content` or `uri` must be supplied. Supplying both or neither
+// returns [google.rpc.Code.INVALID_ARGUMENT][]. See
+// [audio limits](https://cloud.google.com/speech/limits#content).
+message RecognitionAudio {
+ oneof audio_source {
+ // The audio data bytes encoded as specified in
+ // `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
+ // pure binary representation, whereas JSON representations use base64.
+ bytes content = 1;
+
+ // URI that points to a file that contains audio data bytes as specified in
+ // `RecognitionConfig`. Currently, only Google Cloud Storage URIs are
+ // supported, which must be specified in the following format:
+ // `gs://bucket_name/object_name` (other URI formats return
+ // [google.rpc.Code.INVALID_ARGUMENT][]). For more information, see
+ // [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
+ string uri = 2;
+ }
+}
+
+// The only message returned to the client by the `Recognize` method. It
+// contains the result as zero or more sequential `SpeechRecognitionResult`
+// messages.
+message RecognizeResponse {
+ // *Output-only* Sequential list of transcription results corresponding to
+ // sequential portions of audio.
+ repeated SpeechRecognitionResult results = 2;
+}
+
+// The only message returned to the client by the `LongRunningRecognize` method.
+// It contains the result as zero or more sequential `SpeechRecognitionResult`
+// messages. It is included in the `result.response` field of the `Operation`
+// returned by the `GetOperation` call of the `google::longrunning::Operations`
+// service.
+message LongRunningRecognizeResponse {
+ // *Output-only* Sequential list of transcription results corresponding to
+ // sequential portions of audio.
+ repeated SpeechRecognitionResult results = 2;
+}
+
+// Describes the progress of a long-running `LongRunningRecognize` call. It is
+// included in the `metadata` field of the `Operation` returned by the
+// `GetOperation` call of the `google::longrunning::Operations` service.
+message LongRunningRecognizeMetadata {
+ // Approximate percentage of audio processed thus far. Guaranteed to be 100
+ // when the audio is fully processed and the results are available.
+ int32 progress_percent = 1;
+
+ // Time when the request was received.
+ google.protobuf.Timestamp start_time = 2;
+
+ // Time of the most recent processing update.
+ google.protobuf.Timestamp last_update_time = 3;
+}
+
+// `StreamingRecognizeResponse` is the only message returned to the client by
+// `StreamingRecognize`. A series of one or more `StreamingRecognizeResponse`
+// messages are streamed back to the client.
+//
+// Here's an example of a series of ten `StreamingRecognizeResponse`s that might
+// be returned while processing audio:
+//
+// 1. results { alternatives { transcript: "tube" } stability: 0.01 }
+//
+// 2. results { alternatives { transcript: "to be a" } stability: 0.01 }
+//
+// 3. results { alternatives { transcript: "to be" } stability: 0.9 }
+// results { alternatives { transcript: " or not to be" } stability: 0.01 }
+//
+// 4. results { alternatives { transcript: "to be or not to be"
+// confidence: 0.92 }
+// alternatives { transcript: "to bee or not to bee" }
+// is_final: true }
+//
+// 5. results { alternatives { transcript: " that's" } stability: 0.01 }
+//
+// 6. results { alternatives { transcript: " that is" } stability: 0.9 }
+// results { alternatives { transcript: " the question" } stability: 0.01 }
+//
+// 7. speech_event_type: END_OF_SINGLE_UTTERANCE
+//
+// 8. results { alternatives { transcript: " that is the question"
+// confidence: 0.98 }
+// alternatives { transcript: " that was the question" }
+// is_final: true }
+//
+// Notes:
+//
+// - Only two of the above responses #4 and #8 contain final results; they are
+// indicated by `is_final: true`. Concatenating these together generates the
+// full transcript: "to be or not to be that is the question".
+//
+// - The others contain interim `results`. #3 and #6 contain two interim
+// `results`: the first portion has a high stability and is less likely to
+// change; the second portion has a low stability and is very likely to
+// change. A UI designer might choose to show only high stability `results`.
+//
+// - The specific `stability` and `confidence` values shown above are only for
+// illustrative purposes. Actual values may vary.
+//
+// - In each response, only one of these fields will be set:
+// `error`,
+// `speech_event_type`, or
+// one or more (repeated) `results`.
+message StreamingRecognizeResponse {
+ // Indicates the type of speech event.
+ enum SpeechEventType {
+ // No speech event specified.
+ SPEECH_EVENT_UNSPECIFIED = 0;
+
+ // This event indicates that the server has detected the end of the user's
+ // speech utterance and expects no additional speech. Therefore, the server
+ // will not process additional audio (although it may subsequently return
+ // additional results). The client should stop sending additional audio
+ // data, half-close the gRPC connection, and wait for any additional results
+ // until the server closes the gRPC connection. This event is only sent if
+ // `single_utterance` was set to `true`, and is not used otherwise.
+ END_OF_SINGLE_UTTERANCE = 1;
+ }
+
+ // *Output-only* If set, returns a [google.rpc.Status][] message that
+ // specifies the error for the operation.
+ google.rpc.Status error = 1;
+
+ // *Output-only* This repeated list contains zero or more results that
+ // correspond to consecutive portions of the audio currently being processed.
+ // It contains zero or one `is_final=true` result (the newly settled portion),
+ // followed by zero or more `is_final=false` results.
+ repeated StreamingRecognitionResult results = 2;
+
+ // *Output-only* Indicates the type of speech event.
+ SpeechEventType speech_event_type = 4;
+}
+
+// A streaming speech recognition result corresponding to a portion of the audio
+// that is currently being processed.
+message StreamingRecognitionResult {
+ // *Output-only* May contain one or more recognition hypotheses (up to the
+ // maximum specified in `max_alternatives`).
+ repeated SpeechRecognitionAlternative alternatives = 1;
+
+ // *Output-only* If `false`, this `StreamingRecognitionResult` represents an
+ // interim result that may change. If `true`, this is the final time the
+ // speech service will return this particular `StreamingRecognitionResult`,
+ // the recognizer will not return any further hypotheses for this portion of
+ // the transcript and corresponding audio.
+ bool is_final = 2;
+
+ // *Output-only* An estimate of the likelihood that the recognizer will not
+ // change its guess about this interim result. Values range from 0.0
+ // (completely unstable) to 1.0 (completely stable).
+ // This field is only provided for interim results (`is_final=false`).
+ // The default of 0.0 is a sentinel value indicating `stability` was not set.
+ float stability = 3;
+}
+
+// A speech recognition result corresponding to a portion of the audio.
+message SpeechRecognitionResult {
+ // *Output-only* May contain one or more recognition hypotheses (up to the
+ // maximum specified in `max_alternatives`).
+ repeated SpeechRecognitionAlternative alternatives = 1;
+}
+
+// Alternative hypotheses (a.k.a. n-best list).
+message SpeechRecognitionAlternative {
+ // *Output-only* Transcript text representing the words that the user spoke.
+ string transcript = 1;
+
+ // *Output-only* The confidence estimate between 0.0 and 1.0. A higher number
+ // indicates an estimated greater likelihood that the recognized words are
+ // correct. This field is typically provided only for the top hypothesis, and
+ // only for `is_final=true` results. Clients should not rely on the
+ // `confidence` field as it is not guaranteed to be accurate, or even set, in
+ // any of the results.
+ // The default of 0.0 is a sentinel value indicating `confidence` was not set.
+ float confidence = 2;
+}
diff --git a/third_party/googleapis/google/cloud/speech/v1/cloud_speech_gapic.yaml b/third_party/googleapis/google/cloud/speech/v1/cloud_speech_gapic.yaml
new file mode 100644
index 0000000000..debb8f93ba
--- /dev/null
+++ b/third_party/googleapis/google/cloud/speech/v1/cloud_speech_gapic.yaml
@@ -0,0 +1,86 @@
+type: com.google.api.codegen.ConfigProto
+language_settings:
+ java:
+ package_name: com.google.cloud.speech.spi.v1
+ python:
+ package_name: google.cloud.gapic.speech.v1
+ go:
+ package_name: cloud.google.com/go/speech/apiv1
+ csharp:
+ package_name: Google.Cloud.Speech.V1
+ ruby:
+ package_name: Google::Cloud::Speech::V1
+ php:
+ package_name: Google\Cloud\Speech\V1
+ nodejs:
+ package_name: speech.v1
+ domain_layer_location: google-cloud
+license_header:
+ copyright_file: copyright-google.txt
+ license_file: license-header-apache-2.0.txt
+interfaces:
+- name: google.cloud.speech.v1.Speech
+ collections: []
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 190000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 190000
+ total_timeout_millis: 600000
+ methods:
+ - name: Recognize
+ flattening:
+ groups:
+ - parameters:
+ - config
+ - audio
+ required_fields:
+ - config
+ - audio
+ sample_code_init_fields:
+ - config.encoding=FLAC
+ - config.sample_rate_hertz=44100
+ - config.language_code="en-US"
+ - audio.uri=gs://bucket_name/file_name.flac
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 190000
+ - name: LongRunningRecognize
+ flattening:
+ groups:
+ - parameters:
+ - config
+ - audio
+ required_fields:
+ - config
+ - audio
+ sample_code_init_fields:
+ - config.encoding=FLAC
+ - config.sample_rate_hertz=44100
+ - config.language_code="en-US"
+ - audio.uri=gs://bucket_name/file_name.flac
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ long_running:
+ return_type: google.cloud.speech.v1.LongRunningRecognizeResponse
+ metadata_type: google.cloud.speech.v1.LongRunningRecognizeMetadata
+ polling_interval_millis: 20000
+ - name: StreamingRecognize
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 190000
diff --git a/third_party/googleapis/google/cloud/speech/v1beta1/cloud_speech.proto b/third_party/googleapis/google/cloud/speech/v1beta1/cloud_speech.proto
new file mode 100644
index 0000000000..f92c6143af
--- /dev/null
+++ b/third_party/googleapis/google/cloud/speech/v1beta1/cloud_speech.proto
@@ -0,0 +1,419 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.speech.v1beta1;
+
+import "google/api/annotations.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/speech/v1beta1;speech";
+option java_multiple_files = true;
+option java_outer_classname = "SpeechProto";
+option java_package = "com.google.cloud.speech.v1beta1";
+
+
+// Service that implements Google Cloud Speech API.
+service Speech {
+ // Performs synchronous speech recognition: receive results after all audio
+ // has been sent and processed.
+ rpc SyncRecognize(SyncRecognizeRequest) returns (SyncRecognizeResponse) {
+ option (google.api.http) = { post: "/v1beta1/speech:syncrecognize" body: "*" };
+ }
+
+ // Performs asynchronous speech recognition: receive results via the
+ // [google.longrunning.Operations]
+ // (/speech/reference/rest/v1beta1/operations#Operation)
+ // interface. Returns either an
+ // `Operation.error` or an `Operation.response` which contains
+ // an `AsyncRecognizeResponse` message.
+ rpc AsyncRecognize(AsyncRecognizeRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1beta1/speech:asyncrecognize" body: "*" };
+ }
+
+ // Performs bidirectional streaming speech recognition: receive results while
+ // sending audio. This method is only available via the gRPC API (not REST).
+ rpc StreamingRecognize(stream StreamingRecognizeRequest) returns (stream StreamingRecognizeResponse);
+}
+
+// The top-level message sent by the client for the `SyncRecognize` method.
+message SyncRecognizeRequest {
+ // *Required* Provides information to the recognizer that specifies how to
+ // process the request.
+ RecognitionConfig config = 1;
+
+ // *Required* The audio data to be recognized.
+ RecognitionAudio audio = 2;
+}
+
+// The top-level message sent by the client for the `AsyncRecognize` method.
+message AsyncRecognizeRequest {
+ // *Required* Provides information to the recognizer that specifies how to
+ // process the request.
+ RecognitionConfig config = 1;
+
+ // *Required* The audio data to be recognized.
+ RecognitionAudio audio = 2;
+}
+
+// The top-level message sent by the client for the `StreamingRecognize` method.
+// Multiple `StreamingRecognizeRequest` messages are sent. The first message
+// must contain a `streaming_config` message and must not contain `audio` data.
+// All subsequent messages must contain `audio` data and must not contain a
+// `streaming_config` message.
+message StreamingRecognizeRequest {
+ oneof streaming_request {
+ // Provides information to the recognizer that specifies how to process the
+ // request. The first `StreamingRecognizeRequest` message must contain a
+ // `streaming_config` message.
+ StreamingRecognitionConfig streaming_config = 1;
+
+ // The audio data to be recognized. Sequential chunks of audio data are sent
+ // in sequential `StreamingRecognizeRequest` messages. The first
+ // `StreamingRecognizeRequest` message must not contain `audio_content` data
+ // and all subsequent `StreamingRecognizeRequest` messages must contain
+ // `audio_content` data. The audio bytes must be encoded as specified in
+ // `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
+ // pure binary representation (not base64). See
+ // [audio limits](https://cloud.google.com/speech/limits#content).
+ bytes audio_content = 2;
+ }
+}
+
+// Provides information to the recognizer that specifies how to process the
+// request.
+message StreamingRecognitionConfig {
+ // *Required* Provides information to the recognizer that specifies how to
+ // process the request.
+ RecognitionConfig config = 1;
+
+ // *Optional* If `false` or omitted, the recognizer will perform continuous
+ // recognition (continuing to wait for and process audio even if the user
+ // pauses speaking) until the client closes the input stream (gRPC API) or
+ // until the maximum time limit has been reached. May return multiple
+ // `StreamingRecognitionResult`s with the `is_final` flag set to `true`.
+ //
+ // If `true`, the recognizer will detect a single spoken utterance. When it
+ // detects that the user has paused or stopped speaking, it will return an
+ // `END_OF_UTTERANCE` event and cease recognition. It will return no more than
+ // one `StreamingRecognitionResult` with the `is_final` flag set to `true`.
+ bool single_utterance = 2;
+
+ // *Optional* If `true`, interim results (tentative hypotheses) may be
+ // returned as they become available (these interim results are indicated with
+ // the `is_final=false` flag).
+ // If `false` or omitted, only `is_final=true` result(s) are returned.
+ bool interim_results = 3;
+}
+
+// Provides information to the recognizer that specifies how to process the
+// request.
+message RecognitionConfig {
+ // Audio encoding of the data sent in the audio message. All encodings support
+ // only 1 channel (mono) audio. Only `FLAC` includes a header that describes
+ // the bytes of audio that follow the header. The other encodings are raw
+ // audio bytes with no header.
+ //
+ // For best results, the audio source should be captured and transmitted using
+ // a lossless encoding (`FLAC` or `LINEAR16`). Recognition accuracy may be
+ // reduced if lossy codecs (such as AMR, AMR_WB and MULAW) are used to capture
+ // or transmit the audio, particularly if background noise is present.
+ enum AudioEncoding {
+ // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
+ ENCODING_UNSPECIFIED = 0;
+
+ // Uncompressed 16-bit signed little-endian samples (Linear PCM).
+ // This is the only encoding that may be used by `AsyncRecognize`.
+ LINEAR16 = 1;
+
+ // This is the recommended encoding for `SyncRecognize` and
+ // `StreamingRecognize` because it uses lossless compression; therefore
+ // recognition accuracy is not compromised by a lossy codec.
+ //
+ // The stream FLAC (Free Lossless Audio Codec) encoding is specified at:
+ // http://flac.sourceforge.net/documentation.html.
+ // 16-bit and 24-bit samples are supported.
+ // Not all fields in STREAMINFO are supported.
+ FLAC = 2;
+
+ // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
+ MULAW = 3;
+
+ // Adaptive Multi-Rate Narrowband codec. `sample_rate` must be 8000 Hz.
+ AMR = 4;
+
+ // Adaptive Multi-Rate Wideband codec. `sample_rate` must be 16000 Hz.
+ AMR_WB = 5;
+ }
+
+ // *Required* Encoding of audio data sent in all `RecognitionAudio` messages.
+ AudioEncoding encoding = 1;
+
+ // *Required* Sample rate in Hertz of the audio data sent in all
+ // `RecognitionAudio` messages. Valid values are: 8000-48000.
+ // 16000 is optimal. For best results, set the sampling rate of the audio
+ // source to 16000 Hz. If that's not possible, use the native sample rate of
+ // the audio source (instead of re-sampling).
+ int32 sample_rate = 2;
+
+ // *Optional* The language of the supplied audio as a BCP-47 language tag.
+ // Example: "en-GB" https://www.rfc-editor.org/rfc/bcp/bcp47.txt
+ // If omitted, defaults to "en-US". See
+ // [Language Support](https://cloud.google.com/speech/docs/languages)
+ // for a list of the currently supported language codes.
+ string language_code = 3;
+
+ // *Optional* Maximum number of recognition hypotheses to be returned.
+ // Specifically, the maximum number of `SpeechRecognitionAlternative` messages
+ // within each `SpeechRecognitionResult`.
+ // The server may return fewer than `max_alternatives`.
+ // Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
+ // one. If omitted, will return a maximum of one.
+ int32 max_alternatives = 4;
+
+ // *Optional* If set to `true`, the server will attempt to filter out
+ // profanities, replacing all but the initial character in each filtered word
+ // with asterisks, e.g. "f***". If set to `false` or omitted, profanities
+ // won't be filtered out.
+ bool profanity_filter = 5;
+
+ // *Optional* A means to provide context to assist the speech recognition.
+ SpeechContext speech_context = 6;
+}
+
+// Provides "hints" to the speech recognizer to favor specific words and phrases
+// in the results.
+message SpeechContext {
+ // *Optional* A list of strings containing words and phrases "hints" so that
+ // the speech recognition is more likely to recognize them. This can be used
+ // to improve the accuracy for specific words and phrases, for example, if
+ // specific commands are typically spoken by the user. This can also be used
+ // to add additional words to the vocabulary of the recognizer. See
+ // [usage limits](https://cloud.google.com/speech/limits#content).
+ repeated string phrases = 1;
+}
+
+// Contains audio data in the encoding specified in the `RecognitionConfig`.
+// Either `content` or `uri` must be supplied. Supplying both or neither
+// returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. See
+// [audio limits](https://cloud.google.com/speech/limits#content).
+message RecognitionAudio {
+ oneof audio_source {
+ // The audio data bytes encoded as specified in
+ // `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
+ // pure binary representation, whereas JSON representations use base64.
+ bytes content = 1;
+
+ // URI that points to a file that contains audio data bytes as specified in
+ // `RecognitionConfig`. Currently, only Google Cloud Storage URIs are
+ // supported, which must be specified in the following format:
+ // `gs://bucket_name/object_name` (other URI formats return
+ // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
+ // [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
+ string uri = 2;
+ }
+}
+
+// The only message returned to the client by `SyncRecognize`. It
+// contains the result as zero or more sequential `SpeechRecognitionResult`
+// messages.
+message SyncRecognizeResponse {
+ // *Output-only* Sequential list of transcription results corresponding to
+ // sequential portions of audio.
+ repeated SpeechRecognitionResult results = 2;
+}
+
+// The only message returned to the client by `AsyncRecognize`. It contains the
+// result as zero or more sequential `SpeechRecognitionResult` messages. It is
+// included in the `result.response` field of the `Operation` returned by the
+// `GetOperation` call of the `google::longrunning::Operations` service.
+message AsyncRecognizeResponse {
+ // *Output-only* Sequential list of transcription results corresponding to
+ // sequential portions of audio.
+ repeated SpeechRecognitionResult results = 2;
+}
+
+// Describes the progress of a long-running `AsyncRecognize` call. It is
+// included in the `metadata` field of the `Operation` returned by the
+// `GetOperation` call of the `google::longrunning::Operations` service.
+message AsyncRecognizeMetadata {
+ // Approximate percentage of audio processed thus far. Guaranteed to be 100
+ // when the audio is fully processed and the results are available.
+ int32 progress_percent = 1;
+
+ // Time when the request was received.
+ google.protobuf.Timestamp start_time = 2;
+
+ // Time of the most recent processing update.
+ google.protobuf.Timestamp last_update_time = 3;
+}
+
+// `StreamingRecognizeResponse` is the only message returned to the client by
+// `StreamingRecognize`. A series of one or more `StreamingRecognizeResponse`
+// messages are streamed back to the client.
+//
+// Here's an example of a series of ten `StreamingRecognizeResponse`s that might
+// be returned while processing audio:
+//
+// 1. endpointer_type: START_OF_SPEECH
+//
+// 2. results { alternatives { transcript: "tube" } stability: 0.01 }
+// result_index: 0
+//
+// 3. results { alternatives { transcript: "to be a" } stability: 0.01 }
+// result_index: 0
+//
+// 4. results { alternatives { transcript: "to be" } stability: 0.9 }
+// results { alternatives { transcript: " or not to be" } stability: 0.01 }
+// result_index: 0
+//
+// 5. results { alternatives { transcript: "to be or not to be"
+// confidence: 0.92 }
+// alternatives { transcript: "to bee or not to bee" }
+// is_final: true }
+// result_index: 0
+//
+// 6. results { alternatives { transcript: " that's" } stability: 0.01 }
+// result_index: 1
+//
+// 7. results { alternatives { transcript: " that is" } stability: 0.9 }
+// results { alternatives { transcript: " the question" } stability: 0.01 }
+// result_index: 1
+//
+// 8. endpointer_type: END_OF_SPEECH
+//
+// 9. results { alternatives { transcript: " that is the question"
+// confidence: 0.98 }
+// alternatives { transcript: " that was the question" }
+// is_final: true }
+// result_index: 1
+//
+// 10. endpointer_type: END_OF_AUDIO
+//
+// Notes:
+//
+// - Only two of the above responses #5 and #9 contain final results, they are
+// indicated by `is_final: true`. Concatenating these together generates the
+// full transcript: "to be or not to be that is the question".
+//
+// - The others contain interim `results`. #4 and #7 contain two interim
+// `results`, the first portion has a high stability and is less likely to
+// change, the second portion has a low stability and is very likely to
+// change. A UI designer might choose to show only high stability `results`.
+//
+// - The specific `stability` and `confidence` values shown above are only for
+// illustrative purposes. Actual values may vary.
+//
+// - The `result_index` indicates the portion of audio that has had final
+// results returned, and is no longer being processed. For example, the
+// `results` in #6 and later correspond to the portion of audio after
+// "to be or not to be".
+message StreamingRecognizeResponse {
+ // Indicates the type of endpointer event.
+ enum EndpointerType {
+ // No endpointer event specified.
+ ENDPOINTER_EVENT_UNSPECIFIED = 0;
+
+ // Speech has been detected in the audio stream, and the service is
+ // beginning to process it.
+ START_OF_SPEECH = 1;
+
+ // Speech has ceased to be detected in the audio stream. (For example, the
+ // user may have paused after speaking.) If `single_utterance` is `false`,
+ // the service will continue to process audio, and if subsequent speech is
+ // detected, will send another START_OF_SPEECH event.
+ END_OF_SPEECH = 2;
+
+ // This event is sent after the client has half-closed the input stream gRPC
+ // connection and the server has received all of the audio. (The server may
+ // still be processing the audio and may subsequently return additional
+ // results.)
+ END_OF_AUDIO = 3;
+
+ // This event is only sent when `single_utterance` is `true`. It indicates
+ // that the server has detected the end of the user's speech utterance and
+ // expects no additional speech. Therefore, the server will not process
+ // additional audio (although it may subsequently return additional
+ // results). The client should stop sending additional audio data,
+ // half-close the gRPC connection, and wait for any additional results
+ // until the server closes the gRPC connection.
+ END_OF_UTTERANCE = 4;
+ }
+
+ // *Output-only* If set, returns a [google.rpc.Status][google.rpc.Status] message that
+ // specifies the error for the operation.
+ google.rpc.Status error = 1;
+
+ // *Output-only* This repeated list contains zero or more results that
+ // correspond to consecutive portions of the audio currently being processed.
+ // It contains zero or one `is_final=true` result (the newly settled portion),
+ // followed by zero or more `is_final=false` results.
+ repeated StreamingRecognitionResult results = 2;
+
+ // *Output-only* Indicates the lowest index in the `results` array that has
+ // changed. The repeated `StreamingRecognitionResult` results overwrite past
+ // results at this index and higher.
+ int32 result_index = 3;
+
+ // *Output-only* Indicates the type of endpointer event.
+ EndpointerType endpointer_type = 4;
+}
+
+// A streaming speech recognition result corresponding to a portion of the audio
+// that is currently being processed.
+message StreamingRecognitionResult {
+ // *Output-only* May contain one or more recognition hypotheses (up to the
+ // maximum specified in `max_alternatives`).
+ repeated SpeechRecognitionAlternative alternatives = 1;
+
+ // *Output-only* If `false`, this `StreamingRecognitionResult` represents an
+ // interim result that may change. If `true`, this is the final time the
+ // speech service will return this particular `StreamingRecognitionResult`,
+ // the recognizer will not return any further hypotheses for this portion of
+ // the transcript and corresponding audio.
+ bool is_final = 2;
+
+ // *Output-only* An estimate of the likelihood that the recognizer will not
+ // change its guess about this interim result. Values range from 0.0
+ // (completely unstable) to 1.0 (completely stable).
+ // This field is only provided for interim results (`is_final=false`).
+ // The default of 0.0 is a sentinel value indicating `stability` was not set.
+ float stability = 3;
+}
+
+// A speech recognition result corresponding to a portion of the audio.
+message SpeechRecognitionResult {
+ // *Output-only* May contain one or more recognition hypotheses (up to the
+ // maximum specified in `max_alternatives`).
+ repeated SpeechRecognitionAlternative alternatives = 1;
+}
+
+// Alternative hypotheses (a.k.a. n-best list).
+message SpeechRecognitionAlternative {
+ // *Output-only* Transcript text representing the words that the user spoke.
+ string transcript = 1;
+
+ // *Output-only* The confidence estimate between 0.0 and 1.0. A higher number
+ // indicates an estimated greater likelihood that the recognized words are
+ // correct. This field is typically provided only for the top hypothesis, and
+ // only for `is_final=true` results. Clients should not rely on the
+ // `confidence` field as it is not guaranteed to be accurate, or even set, in
+ // any of the results.
+ // The default of 0.0 is a sentinel value indicating `confidence` was not set.
+ float confidence = 2;
+}
diff --git a/third_party/googleapis/google/cloud/speech/v1beta1/cloud_speech_gapic.yaml b/third_party/googleapis/google/cloud/speech/v1beta1/cloud_speech_gapic.yaml
new file mode 100644
index 0000000000..8f72004f09
--- /dev/null
+++ b/third_party/googleapis/google/cloud/speech/v1beta1/cloud_speech_gapic.yaml
@@ -0,0 +1,89 @@
+type: com.google.api.codegen.ConfigProto
+language_settings:
+ java:
+ package_name: com.google.cloud.speech.spi.v1beta1
+ python:
+ package_name: google.cloud.gapic.speech.v1beta1
+ go:
+ package_name: cloud.google.com/go/speech/apiv1beta1
+ csharp:
+ package_name: Google.Cloud.Speech.V1Beta1
+ ruby:
+ package_name: Google::Cloud::Speech::V1beta1
+ php:
+ package_name: Google\Cloud\Speech\V1beta1
+ nodejs:
+ package_name: speech.v1beta1
+ domain_layer_location: google-cloud
+license_header:
+ copyright_file: copyright-google.txt
+ license_file: license-header-apache-2.0.txt
+interfaces:
+- name: google.cloud.speech.v1beta1.Speech
+ smoke_test:
+ method: SyncRecognize
+ init_fields:
+ - config.language_code="en-US"
+ - config.sample_rate=44100
+ - config.encoding=FLAC
+ - audio.uri="gs://gapic-toolkit/hello.flac"
+ collections: []
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 190000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 190000
+ total_timeout_millis: 600000
+ methods:
+ - name: SyncRecognize
+ flattening:
+ groups:
+ - parameters:
+ - config
+ - audio
+ required_fields:
+ - config
+ - audio
+ sample_code_init_fields:
+ - config.encoding=FLAC
+ - config.sample_rate=44100
+ - audio.uri=gs://bucket_name/file_name.flac
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: AsyncRecognize
+ flattening:
+ groups:
+ - parameters:
+ - config
+ - audio
+ required_fields:
+ - config
+ - audio
+ sample_code_init_fields:
+ - config.encoding=FLAC
+ - config.sample_rate=44100
+ - audio.uri=gs://bucket_name/file_name.flac
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ long_running:
+ return_type: google.cloud.speech.v1beta1.AsyncRecognizeResponse
+ metadata_type: google.cloud.speech.v1beta1.AsyncRecognizeMetadata
+ - name: StreamingRecognize
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 190000
diff --git a/third_party/googleapis/google/cloud/support/common.proto b/third_party/googleapis/google/cloud/support/common.proto
new file mode 100644
index 0000000000..374d69c1f0
--- /dev/null
+++ b/third_party/googleapis/google/cloud/support/common.proto
@@ -0,0 +1,334 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.support.common;
+
+import "google/api/annotations.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/support/common;common";
+option java_outer_classname = "CloudSupportProto";
+option java_package = "com.google.cloud.support.common";
+
+
+// A Google Cloud Platform account that identifies support eligibility for a
+// Cloud resource. Currently the Cloud resource can only be an Organization
+// but this might change in future.
+message SupportAccount {
+ // The current state of this SupportAccount.
+ enum State {
+ // Account is in an unknown state.
+ STATE_UNSPECIFIED = 0;
+
+ // Account is in an active state.
+ ACTIVE = 1;
+
+ // Account has been created but is being provisioned in support systems.
+ PENDING = 2;
+
+ // Account deletion has been requested by the user.
+ PENDING_DELETION = 3;
+ }
+
+ // Pricing model applicable to this support account.
+ enum PricingModel {
+ // This account is subscribed to an unknown pricing model.
+ PRICING_MODEL_UNKNOWN = 0;
+
+ // Package based pricing (Platinum, Gold, Silver, Bronze).
+ PACKAGES = 1;
+
+ // Support charges are calculated based on user seats a.k.a,
+ // "Pick Your Team" model.
+ USER_ROLES = 2;
+ }
+
+ // The resource name for a support account in format
+ // `supportAccounts/{account_id}`.
+ // Output only.
+ string name = 1;
+
+ // Identifier for this entity that gets persisted in storage system. The
+ // resource name is populated using this field in format
+ // `supportAccounts/{account_id}`.
+ string account_id = 2;
+
+ // The Cloud resource with which this support account is associated.
+ string cloud_resource = 3;
+
+ // A user friendly display name assigned to this support account.
+ string display_name = 4;
+
+ // Indicates the current state of an account.
+ State state = 5;
+
+ // Time when this account was created.
+ // Output only.
+ google.protobuf.Timestamp create_time = 6;
+
+ // The resource name of a billing account associated with this support
+ // account. For example, `billingAccounts/ABCDEF-012345-567890`.
+ string billing_account_name = 7;
+
+ string unify_account_id = 8;
+
+ // The PricingModel applicable to this support account.
+ PricingModel pricing_model = 9;
+}
+
+// A support case created by the user.
+message Case {
+ // The case priority with P0 being the most urgent and P4 the least.
+ enum Priority {
+ // Priority is undefined or has not been set yet.
+ PRIORITY_UNSPECIFIED = 0;
+
+ // Extreme impact on a production service - Service is hard down.
+ P0 = 1;
+
+ // Critical impact on a production service - Service is currently unusable.
+ P1 = 2;
+
+ // Severe impact on a production service - Service is usable but greatly
+ // impaired.
+ P2 = 3;
+
+ // Medium impact on a production service - Service is available, but
+ // moderately impaired.
+ P3 = 4;
+
+ // General questions or minor issues - Production service is fully
+ // available.
+ P4 = 5;
+ }
+
+ // The state of a case.
+ enum State {
+ // Case is in an unknown state.
+ STATE_UNSPECIFIED = 0;
+
+ // Case has been created but no one is assigned to work on it yet.
+ NEW = 1;
+
+ // Case has been assigned to a support agent.
+ ASSIGNED = 2;
+
+ // A support agent is currently investigating the case.
+ IN_PROGRESS_GOOGLE_SUPPORT = 3;
+
+ // Case has been forwarded to product team for further investigation.
+ IN_PROGRESS_GOOGLE_ENG = 4;
+
+ // Case is under investigation and relates to a known issue.
+ IN_PROGRESS_KNOWN_ISSUE = 5;
+
+ // Case is waiting for a response from the customer.
+ WAITING_FOR_CUSTOMER_RESPONSE = 6;
+
+ // A solution has been offered for the case but it isn't closed yet.
+ SOLUTION_OFFERED = 7;
+
+ // Cases has been fully resolved and is in a closed state.
+ CLOSED = 8;
+ }
+
+ // The resource name for the Case in format
+ // `supportAccounts/{account_id}/cases/{case_id}`
+ string name = 1;
+
+ // The short summary of the issue reported in this case.
+ string display_name = 2;
+
+ // The board description of issue provided with initial summary.
+ string description = 3;
+
+ // The product component for which this Case is reported.
+ string component = 4;
+
+ // The product subcomponent for which this Case is reported.
+ string subcomponent = 5;
+
+ // Timezone the client sending this request is in.
+ // It should be in a format IANA recognizes: https://www.iana.org/time-zone
+ // There is no additional validation done by the API.
+ string client_timezone = 6;
+
+ // The email addresses that can be copied to receive updates on this case.
+ // Users can specify a maximum of 10 email addresses.
+ repeated string cc_addresses = 7;
+
+ // The Google Cloud Platform project ID for which this case is created.
+ string project_id = 8;
+
+ // List of customer issues associated with this case.
+ repeated CustomerIssue issues = 10;
+
+ // The current priority of this case.
+ Priority priority = 11;
+
+ // The current state of this case.
+ State state = 12;
+
+ // Time when this case was created.
+ // Output only.
+ google.protobuf.Timestamp create_time = 13;
+
+ // Time when this case was last updated.
+ // Output only.
+ google.protobuf.Timestamp update_time = 14;
+
+ // Email address of user who created this case.
+ // Output only. It is inferred from credentials supplied during case creation.
+ string creator_email = 15;
+
+ // The issue category applicable to this case.
+ string category = 16;
+}
+
+// Reference to a Google internal ticket used for investigating a support case.
+// Not every support case will have an internal ticket associated with it.
+// A support case can have multiple tickets linked to it.
+message CustomerIssue {
+ // The status of a customer issue.
+ enum IssueState {
+ // Issue in an unknown state.
+ ISSUE_STATE_UNSPECIFIED = 0;
+
+ // Issue is currently open but the work on it has not been started.
+ OPEN = 1;
+
+ // Issue is currently being worked on.
+ IN_PROGRESS = 2;
+
+ // Issue is fixed.
+ FIXED = 3;
+
+ // Issue has been marked as invalid.
+ WONT_FIX = 4;
+
+ // Issue verified and in production.
+ VERIFIED = 5;
+ }
+
+ // Unique identifier for the internal issue.
+ // Output only.
+ string issue_id = 1;
+
+ // Represents current status of the internal ticket.
+ // Output only.
+ IssueState state = 2;
+
+ // Time when the internal issue was created.
+ // Output only.
+ google.protobuf.Timestamp create_time = 3;
+
+ // Time when the internal issue was marked as resolved.
+ // Output only.
+ google.protobuf.Timestamp resolve_time = 4;
+
+ // Time when the internal issue was last updated.
+ // Output only.
+ google.protobuf.Timestamp update_time = 5;
+}
+
+// A message that contains mapping of a user and their role under a support
+// account.
+message SupportRole {
+ // A role which determines the support resources and features a user might
+ // get access to.
+ enum Role {
+ // An unknown role.
+ ROLE_UNSPECIFIED = 0;
+
+ // The basic support role.
+ BASIC = 1;
+
+ // The developer role.
+ DEVELOPER = 2;
+
+ // The operation role.
+ OPERATION = 3;
+
+ // The site reliability role.
+ SITE_RELIABILITY = 4;
+ }
+
+ // Email address of user being added through this Role.
+ string email = 1;
+
+ // The type of role assigned to user.
+ Role role = 2;
+}
+
+// The comment text associated with a `Case`.
+message Comment {
+ // Text containing a maximum of 3000 characters.
+ string text = 1;
+
+ // Time when this update was created.
+ // Output only.
+ google.protobuf.Timestamp create_time = 2;
+
+ // The email address/name of user who created this comment.
+ // Output only.
+ string author = 3;
+
+ // The resource name for this comment in format
+ // `supportAccounts/{account_id}/cases/{case_id}/{comment_id}`.
+ // Output only.
+ string name = 4;
+}
+
+// Represents the product component taxonomy that is to be used while creating
+// or updating a `Case`. A client should obtain the list of issue categories,
+// component/subcomponent from this object and specify it in `Case.category`,
+// `Case.component` and `Case.subcomponent` fields respectively.
+message IssueTaxonomy {
+ // The representation of a product component. It is composed of a canonical
+ // name for the product (e.g., Google App Engine), languages in which a
+ // support ticket can be created under this component, a template that
+ // provides hints on important details to be filled out before submitting a
+ // case. It also contains an embedded list of product subcomponents that have
+ // similar attributes as top-level components.
+ // (e.g., Google App Engine > Memcache).
+ message Component {
+ // User friendly name of this component.
+ string display_name = 1;
+
+ // List of languages in which a support case can be created under this
+ // component. Represented by language codes in ISO_639-1 standard.
+ repeated string languages = 2;
+
+ // Template to be used while filling the description of a support case.
+ string template = 3;
+
+ // List of subcomponents under this component.
+ repeated Component subcomponents = 4;
+ }
+
+ // Represents the category of issue (Technical or Non-Technical)
+ // reported through a support case.
+ message Category {
+ // User friendly name of this category.
+ string display_name = 1;
+
+ // Map of product components under this category.
+ map<string, Component> components = 2;
+ }
+
+ // Map of available categories.
+ map<string, Category> categories = 1;
+}
diff --git a/third_party/googleapis/google/cloud/support/v1alpha1/cloud_support.proto b/third_party/googleapis/google/cloud/support/v1alpha1/cloud_support.proto
new file mode 100644
index 0000000000..101298c96f
--- /dev/null
+++ b/third_party/googleapis/google/cloud/support/v1alpha1/cloud_support.proto
@@ -0,0 +1,199 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.support.v1alpha1;
+
+import "google/api/annotations.proto";
+import "google/cloud/support/common.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/field_mask.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/support/v1alpha1;support";
+option java_outer_classname = "CloudSupportProto";
+option java_package = "com.google.cloud.support.v1alpha1";
+
+
+// Retrieves the list of Google Cloud Platform Support accounts and manages
+// support cases associated with them.
+service CloudSupport {
+ // Retrieves the support account details given an account identifier.
+ // The authenticated user calling this method must be the account owner.
+ rpc GetSupportAccount(GetSupportAccountRequest) returns (google.cloud.support.common.SupportAccount) {
+ option (google.api.http) = { get: "/v1alpha1/{name=supportAccounts/*}" };
+ }
+
+ // Retrieves the list of accounts the current authenticated user has access
+ // to.
+ rpc ListSupportAccounts(ListSupportAccountsRequest) returns (ListSupportAccountsResponse) {
+ option (google.api.http) = { get: "/v1alpha1/supportAccounts" };
+ }
+
+ // Retrieves the details for a support case. The current authenticated user
+ // calling this method must have permissions to view this case.
+ rpc GetCase(GetCaseRequest) returns (google.cloud.support.common.Case) {
+ option (google.api.http) = { get: "/v1alpha1/{name=supportAccounts/*/cases/*}" };
+ }
+
+ // Retrieves the list of support cases associated with an account. The current
+ // authenticated user must have the permission to list and view these cases.
+ rpc ListCases(ListCasesRequest) returns (ListCasesResponse) {
+ option (google.api.http) = { get: "/v1alpha1/{name=supportAccounts/*}/cases" };
+ }
+
+ // Lists all comments from a case.
+ rpc ListComments(ListCommentsRequest) returns (ListCommentsResponse) {
+ option (google.api.http) = { get: "/v1alpha1/{name=supportAccounts/*/cases/*}/comments" };
+ }
+
+ // Creates a case and associates it with a
+ // [SupportAccount][google.cloud.support.v1alpha2.SupportAcccount]. The
+ // authenticated user attempting this action must have permissions to create a
+ // `Case` under that [SupportAccount].
+ rpc CreateCase(CreateCaseRequest) returns (google.cloud.support.common.Case) {
+ option (google.api.http) = { post: "/v1alpha1/{parent=supportAccounts/*}/cases" body: "case" };
+ }
+
+ // Updates a support case. Only a small set of details (priority, subject and
+ // cc_address) can be update after a case is created.
+ rpc UpdateCase(UpdateCaseRequest) returns (google.cloud.support.common.Case) {
+ option (google.api.http) = { patch: "/v1alpha1/{case.name=supportAccounts/*/cases/*}" body: "case" };
+ }
+
+ // Adds a new comment to a case.
+ rpc CreateComment(CreateCommentRequest) returns (google.cloud.support.common.Comment) {
+ option (google.api.http) = { post: "/v1alpha1/{name=supportAccounts/*/cases/*}/comments" body: "comment" };
+ }
+
+ // Retrieves the taxonomy of product categories and components to be used
+ // while creating a support case.
+ rpc GetIssueTaxonomy(GetIssueTaxonomyRequest) returns (google.cloud.support.common.IssueTaxonomy) {
+ option (google.api.http) = { get: "/v1alpha1:getIssueTaxonomy" };
+ }
+}
+
+// The request message for `GetSupportAccount`.
+message GetSupportAccountRequest {
+ // The resource name of the support accounts. For example:
+ // `supportAccounts/accountA`.
+ string name = 1;
+}
+
+// The request message for `ListSupportAccount`.
+message ListSupportAccountsRequest {
+ // The filter applied to search results. It only supports filtering a support
+ // account list by a cloud_resource. For example, to filter results by support
+ // accounts associated with an Organization, its value should be:
+ // "cloud_resource:organizations/<organization_id>"
+ string filter = 1;
+
+ // Maximum number of accounts fetched with each request.
+ int64 page_size = 2;
+
+ // A token identifying the page of results to return. If unspecified, the
+ // first page is retrieved.
+ string page_token = 3;
+}
+
+// The response message for `ListSupportAccount`.
+message ListSupportAccountsResponse {
+ // A list of support accounts.
+ repeated google.cloud.support.common.SupportAccount accounts = 1;
+
+ // A token to retrieve the next page of results. This should be passed on in
+ // `page_token` field of `ListSupportAccountRequest` for next request. If
+ // unspecified, there are no more results to retrieve.
+ string next_page_token = 2;
+}
+
+// The request message for `GetCase` method.
+message GetCaseRequest {
+ // Name of case resource requested.
+ // For example: "supportAccounts/accountA/cases/123"
+ string name = 1;
+}
+
+// The request message for `ListCase` method.
+message ListCasesRequest {
+ // Name of the account resource for which cases are requested. For example:
+ // "supportAccounts/accountA"
+ string name = 1;
+
+ // The filter applied to the search results. Currently it only accepts "OPEN"
+ // or "CLOSED" strings, filtering out cases that are open or resolved.
+ string filter = 2;
+
+ // Maximum number of cases fetched with each request.
+ int64 page_size = 3;
+
+ // A token identifying the page of results to return. If unspecified, the
+ // first page is retrieved.
+ string page_token = 4;
+}
+
+// The response message for `ListCase` method.
+message ListCasesResponse {
+ // A list of cases.
+ repeated google.cloud.support.common.Case cases = 1;
+
+ // A token to retrieve the next page of results. This should be passed on in
+ // `page_token` field of `ListCaseRequest` for next request. If unspecified,
+ // there are no more results to retrieve.
+ string next_page_token = 2;
+}
+
+// The request message for `ListComments` method.
+message ListCommentsRequest {
+ // The resource name of case for which comments should be listed.
+ string name = 1;
+}
+
+// The response message for `ListComments` method.
+message ListCommentsResponse {
+ // A list of comments.
+ repeated google.cloud.support.common.Comment comments = 1;
+}
+
+// The request message for `CreateCase` method.
+message CreateCaseRequest {
+ // The resource name for `SupportAccount` under which this case is created.
+ string parent = 1;
+
+ // The case resource to create.
+ google.cloud.support.common.Case case = 2;
+}
+
+// The request message for `UpdateCase` method.
+message UpdateCaseRequest {
+ // The case resource to update.
+ google.cloud.support.common.Case case = 1;
+
+ // A field that represents attributes of a Case object that should be updated
+ // as part of this request.
+ google.protobuf.FieldMask update_mask = 2;
+}
+
+// The request message for `CreateComment` method.
+message CreateCommentRequest {
+ // The resource name of case to which this comment should be added.
+ string name = 1;
+
+ // The `Comment` to be added to this case.
+ google.cloud.support.common.Comment comment = 2;
+}
+
+// The request message for `GetIssueTaxonomy` method.
+message GetIssueTaxonomyRequest {
+}
diff --git a/third_party/googleapis/google/cloud/vision/v1/geometry.proto b/third_party/googleapis/google/cloud/vision/v1/geometry.proto
new file mode 100644
index 0000000000..5586c2eb3a
--- /dev/null
+++ b/third_party/googleapis/google/cloud/vision/v1/geometry.proto
@@ -0,0 +1,54 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.vision.v1;
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "GeometryProto";
+option java_package = "com.google.cloud.vision.v1";
+
+
+// A vertex represents a 2D point in the image.
+// NOTE: the vertex coordinates are in the same scale as the original image.
+message Vertex {
+ // X coordinate.
+ int32 x = 1;
+
+ // Y coordinate.
+ int32 y = 2;
+}
+
+// A bounding polygon for the detected image annotation.
+message BoundingPoly {
+ // The bounding polygon vertices.
+ repeated Vertex vertices = 1;
+}
+
+// A 3D position in the image, used primarily for Face detection landmarks.
+// A valid Position must have both x and y coordinates.
+// The position coordinates are in the same scale as the original image.
+message Position {
+ // X coordinate.
+ float x = 1;
+
+ // Y coordinate.
+ float y = 2;
+
+ // Z coordinate (or depth).
+ float z = 3;
+}
diff --git a/third_party/googleapis/google/cloud/vision/v1/image_annotator.proto b/third_party/googleapis/google/cloud/vision/v1/image_annotator.proto
new file mode 100644
index 0000000000..c17f8aeb6f
--- /dev/null
+++ b/third_party/googleapis/google/cloud/vision/v1/image_annotator.proto
@@ -0,0 +1,569 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.vision.v1;
+
+import "google/api/annotations.proto";
+import "google/cloud/vision/v1/geometry.proto";
+import "google/cloud/vision/v1/text_annotation.proto";
+import "google/cloud/vision/v1/web_detection.proto";
+import "google/rpc/status.proto";
+import "google/type/color.proto";
+import "google/type/latlng.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "ImageAnnotatorProto";
+option java_package = "com.google.cloud.vision.v1";
+
+
+// Service that performs Google Cloud Vision API detection tasks over client
+// images, such as face, landmark, logo, label, and text detection. The
+// ImageAnnotator service returns detected entities from the images.
+service ImageAnnotator {
+ // Run image detection and annotation for a batch of images.
+ rpc BatchAnnotateImages(BatchAnnotateImagesRequest) returns (BatchAnnotateImagesResponse) {
+ option (google.api.http) = { post: "/v1/images:annotate" body: "*" };
+ }
+}
+
+// Users describe the type of Google Cloud Vision API tasks to perform over
+// images by using *Feature*s. Each Feature indicates a type of image
+// detection task to perform. Features encode the Cloud Vision API
+// vertical to operate on and the number of top-scoring results to return.
+message Feature {
+ // Type of image feature.
+ enum Type {
+ // Unspecified feature type.
+ TYPE_UNSPECIFIED = 0;
+
+ // Run face detection.
+ FACE_DETECTION = 1;
+
+ // Run landmark detection.
+ LANDMARK_DETECTION = 2;
+
+ // Run logo detection.
+ LOGO_DETECTION = 3;
+
+ // Run label detection.
+ LABEL_DETECTION = 4;
+
+ // Run OCR.
+ TEXT_DETECTION = 5;
+
+ // Run dense text document OCR. Takes precedence when both
+ // DOCUMENT_TEXT_DETECTION and TEXT_DETECTION are present.
+ DOCUMENT_TEXT_DETECTION = 11;
+
+ // Run computer vision models to compute image safe-search properties.
+ SAFE_SEARCH_DETECTION = 6;
+
+ // Compute a set of image properties, such as the image's dominant colors.
+ IMAGE_PROPERTIES = 7;
+
+ // Run crop hints.
+ CROP_HINTS = 9;
+
+ // Run web detection.
+ WEB_DETECTION = 10;
+ }
+
+ // The feature type.
+ Type type = 1;
+
+ // Maximum number of results of this type.
+ int32 max_results = 2;
+}
+
+// External image source (Google Cloud Storage image location).
+message ImageSource {
+ // NOTE: For new code `image_uri` below is preferred.
+ // Google Cloud Storage image URI, which must be in the following form:
+ // `gs://bucket_name/object_name` (for details, see
+ // [Google Cloud Storage Request
+ // URIs](https://cloud.google.com/storage/docs/reference-uris)).
+ // NOTE: Cloud Storage object versioning is not supported.
+ string gcs_image_uri = 1;
+
+ // Image URI which supports:
+ // 1) Google Cloud Storage image URI, which must be in the following form:
+ // `gs://bucket_name/object_name` (for details, see
+ // [Google Cloud Storage Request
+ // URIs](https://cloud.google.com/storage/docs/reference-uris)).
+ // NOTE: Cloud Storage object versioning is not supported.
+ // 2) Publicly accessible image HTTP/HTTPS URL.
+ // This is preferred over the legacy `gcs_image_uri` above. When both
+ // `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
+ // precedence.
+ string image_uri = 2;
+}
+
+// Client image to perform Google Cloud Vision API tasks over.
+message Image {
+ // Image content, represented as a stream of bytes.
+ // Note: as with all `bytes` fields, protobuffers use a pure binary
+ // representation, whereas JSON representations use base64.
+ bytes content = 1;
+
+ // Google Cloud Storage image location. If both `content` and `source`
+ // are provided for an image, `content` takes precedence and is
+ // used to perform the image annotation request.
+ ImageSource source = 2;
+}
+
+// A face annotation object contains the results of face detection.
+message FaceAnnotation {
+ // A face-specific landmark (for example, a face feature).
+ // Landmark positions may fall outside the bounds of the image
+ // if the face is near one or more edges of the image.
+ // Therefore it is NOT guaranteed that `0 <= x < width` or
+ // `0 <= y < height`.
+ message Landmark {
+ // Face landmark (feature) type.
+ // Left and right are defined from the vantage of the viewer of the image
+ // without considering mirror projections typical of photos. So, `LEFT_EYE`,
+ // typically, is the person's right eye.
+ enum Type {
+ // Unknown face landmark detected. Should not be filled.
+ UNKNOWN_LANDMARK = 0;
+
+ // Left eye.
+ LEFT_EYE = 1;
+
+ // Right eye.
+ RIGHT_EYE = 2;
+
+ // Left of left eyebrow.
+ LEFT_OF_LEFT_EYEBROW = 3;
+
+ // Right of left eyebrow.
+ RIGHT_OF_LEFT_EYEBROW = 4;
+
+ // Left of right eyebrow.
+ LEFT_OF_RIGHT_EYEBROW = 5;
+
+ // Right of right eyebrow.
+ RIGHT_OF_RIGHT_EYEBROW = 6;
+
+ // Midpoint between eyes.
+ MIDPOINT_BETWEEN_EYES = 7;
+
+ // Nose tip.
+ NOSE_TIP = 8;
+
+ // Upper lip.
+ UPPER_LIP = 9;
+
+ // Lower lip.
+ LOWER_LIP = 10;
+
+ // Mouth left.
+ MOUTH_LEFT = 11;
+
+ // Mouth right.
+ MOUTH_RIGHT = 12;
+
+ // Mouth center.
+ MOUTH_CENTER = 13;
+
+ // Nose, bottom right.
+ NOSE_BOTTOM_RIGHT = 14;
+
+ // Nose, bottom left.
+ NOSE_BOTTOM_LEFT = 15;
+
+ // Nose, bottom center.
+ NOSE_BOTTOM_CENTER = 16;
+
+ // Left eye, top boundary.
+ LEFT_EYE_TOP_BOUNDARY = 17;
+
+ // Left eye, right corner.
+ LEFT_EYE_RIGHT_CORNER = 18;
+
+ // Left eye, bottom boundary.
+ LEFT_EYE_BOTTOM_BOUNDARY = 19;
+
+ // Left eye, left corner.
+ LEFT_EYE_LEFT_CORNER = 20;
+
+ // Right eye, top boundary.
+ RIGHT_EYE_TOP_BOUNDARY = 21;
+
+ // Right eye, right corner.
+ RIGHT_EYE_RIGHT_CORNER = 22;
+
+ // Right eye, bottom boundary.
+ RIGHT_EYE_BOTTOM_BOUNDARY = 23;
+
+ // Right eye, left corner.
+ RIGHT_EYE_LEFT_CORNER = 24;
+
+ // Left eyebrow, upper midpoint.
+ LEFT_EYEBROW_UPPER_MIDPOINT = 25;
+
+ // Right eyebrow, upper midpoint.
+ RIGHT_EYEBROW_UPPER_MIDPOINT = 26;
+
+ // Left ear tragion.
+ LEFT_EAR_TRAGION = 27;
+
+ // Right ear tragion.
+ RIGHT_EAR_TRAGION = 28;
+
+ // Left eye pupil.
+ LEFT_EYE_PUPIL = 29;
+
+ // Right eye pupil.
+ RIGHT_EYE_PUPIL = 30;
+
+ // Forehead glabella.
+ FOREHEAD_GLABELLA = 31;
+
+ // Chin gnathion.
+ CHIN_GNATHION = 32;
+
+ // Chin left gonion.
+ CHIN_LEFT_GONION = 33;
+
+ // Chin right gonion.
+ CHIN_RIGHT_GONION = 34;
+ }
+
+ // Face landmark type.
+ Type type = 3;
+
+ // Face landmark position.
+ Position position = 4;
+ }
+
+ // The bounding polygon around the face. The coordinates of the bounding box
+ // are in the original image's scale, as returned in `ImageParams`.
+ // The bounding box is computed to "frame" the face in accordance with human
+ // expectations. It is based on the landmarker results.
+ // Note that one or more x and/or y coordinates may not be generated in the
+ // `BoundingPoly` (the polygon will be unbounded) if only a partial face
+ // appears in the image to be annotated.
+ BoundingPoly bounding_poly = 1;
+
+ // The `fd_bounding_poly` bounding polygon is tighter than the
+ // `boundingPoly`, and encloses only the skin part of the face. Typically, it
+ // is used to eliminate the face from any image analysis that detects the
+ // "amount of skin" visible in an image. It is not based on the
+ // landmarker results, only on the initial face detection, hence
+ // the <code>fd</code> (face detection) prefix.
+ BoundingPoly fd_bounding_poly = 2;
+
+ // Detected face landmarks.
+ repeated Landmark landmarks = 3;
+
+ // Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
+ // of the face relative to the image vertical about the axis perpendicular to
+ // the face. Range [-180,180].
+ float roll_angle = 4;
+
+ // Yaw angle, which indicates the leftward/rightward angle that the face is
+ // pointing relative to the vertical plane perpendicular to the image. Range
+ // [-180,180].
+ float pan_angle = 5;
+
+ // Pitch angle, which indicates the upwards/downwards angle that the face is
+ // pointing relative to the image's horizontal plane. Range [-180,180].
+ float tilt_angle = 6;
+
+ // Detection confidence. Range [0, 1].
+ float detection_confidence = 7;
+
+ // Face landmarking confidence. Range [0, 1].
+ float landmarking_confidence = 8;
+
+ // Joy likelihood.
+ Likelihood joy_likelihood = 9;
+
+ // Sorrow likelihood.
+ Likelihood sorrow_likelihood = 10;
+
+ // Anger likelihood.
+ Likelihood anger_likelihood = 11;
+
+ // Surprise likelihood.
+ Likelihood surprise_likelihood = 12;
+
+ // Under-exposed likelihood.
+ Likelihood under_exposed_likelihood = 13;
+
+ // Blurred likelihood.
+ Likelihood blurred_likelihood = 14;
+
+ // Headwear likelihood.
+ Likelihood headwear_likelihood = 15;
+}
+
+// Detected entity location information.
+message LocationInfo {
+ // lat/long location coordinates.
+ google.type.LatLng lat_lng = 1;
+}
+
+// A `Property` consists of a user-supplied name/value pair.
+message Property {
+ // Name of the property.
+ string name = 1;
+
+ // Value of the property.
+ string value = 2;
+}
+
+// Set of detected entity features.
+message EntityAnnotation {
+ // Opaque entity ID. Some IDs may be available in
+ // [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
+ string mid = 1;
+
+ // The language code for the locale in which the entity textual
+ // `description` is expressed.
+ string locale = 2;
+
+ // Entity textual description, expressed in its `locale` language.
+ string description = 3;
+
+ // Overall score of the result. Range [0, 1].
+ float score = 4;
+
+ // The accuracy of the entity detection in an image.
+ // For example, for an image in which the "Eiffel Tower" entity is detected,
+ // this field represents the confidence that there is a tower in the query
+ // image. Range [0, 1].
+ float confidence = 5;
+
+ // The relevancy of the ICA (Image Content Annotation) label to the
+ // image. For example, the relevancy of "tower" is likely higher to an image
+ // containing the detected "Eiffel Tower" than to an image containing a
+ // detected distant towering building, even though the confidence that
+ // there is a tower in each image may be the same. Range [0, 1].
+ float topicality = 6;
+
+ // Image region to which this entity belongs. Currently not produced
+ // for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s
+ // are produced for the entire text detected in an image region, followed by
+ // `boundingPoly`s for each word within the detected text.
+ BoundingPoly bounding_poly = 7;
+
+ // The location information for the detected entity. Multiple
+ // `LocationInfo` elements can be present because one location may
+ // indicate the location of the scene in the image, and another location
+ // may indicate the location of the place where the image was taken.
+ // Location information is usually present for landmarks.
+ repeated LocationInfo locations = 8;
+
+ // Some entities may have optional user-supplied `Property` (name/value)
+ // fields, such a score or string that qualifies the entity.
+ repeated Property properties = 9;
+}
+
+// Set of features pertaining to the image, computed by computer vision
+// methods over safe-search verticals (for example, adult, spoof, medical,
+// violence).
+message SafeSearchAnnotation {
+ // Represents the adult content likelihood for the image.
+ Likelihood adult = 1;
+
+ // Spoof likelihood. The likelihood that an modification
+ // was made to the image's canonical version to make it appear
+ // funny or offensive.
+ Likelihood spoof = 2;
+
+ // Likelihood that this is a medical image.
+ Likelihood medical = 3;
+
+ // Violence likelihood.
+ Likelihood violence = 4;
+}
+
+// Rectangle determined by min and max `LatLng` pairs.
+message LatLongRect {
+ // Min lat/long pair.
+ google.type.LatLng min_lat_lng = 1;
+
+ // Max lat/long pair.
+ google.type.LatLng max_lat_lng = 2;
+}
+
+// Color information consists of RGB channels, score, and the fraction of
+// the image that the color occupies in the image.
+message ColorInfo {
+ // RGB components of the color.
+ google.type.Color color = 1;
+
+ // Image-specific score for this color. Value in range [0, 1].
+ float score = 2;
+
+ // The fraction of pixels the color occupies in the image.
+ // Value in range [0, 1].
+ float pixel_fraction = 3;
+}
+
+// Set of dominant colors and their corresponding scores.
+message DominantColorsAnnotation {
+ // RGB color values with their score and pixel fraction.
+ repeated ColorInfo colors = 1;
+}
+
+// Stores image properties, such as dominant colors.
+message ImageProperties {
+ // If present, dominant colors completed successfully.
+ DominantColorsAnnotation dominant_colors = 1;
+}
+
+// Single crop hint that is used to generate a new crop when serving an image.
+message CropHint {
+ // The bounding polygon for the crop region. The coordinates of the bounding
+ // box are in the original image's scale, as returned in `ImageParams`.
+ BoundingPoly bounding_poly = 1;
+
+ // Confidence of this being a salient region. Range [0, 1].
+ float confidence = 2;
+
+ // Fraction of importance of this salient region with respect to the original
+ // image.
+ float importance_fraction = 3;
+}
+
+// Set of crop hints that are used to generate new crops when serving images.
+message CropHintsAnnotation {
+ repeated CropHint crop_hints = 1;
+}
+
+// Parameters for crop hints annotation request.
+message CropHintsParams {
+ // Aspect ratios in floats, representing the ratio of the width to the height
+ // of the image. For example, if the desired aspect ratio is 4/3, the
+ // corresponding float value should be 1.33333. If not specified, the
+ // best possible crop is returned. The number of provided aspect ratios is
+ // limited to a maximum of 16; any aspect ratios provided after the 16th are
+ // ignored.
+ repeated float aspect_ratios = 1;
+}
+
+// Image context and/or feature-specific parameters.
+message ImageContext {
+ // lat/long rectangle that specifies the location of the image.
+ LatLongRect lat_long_rect = 1;
+
+ // List of languages to use for TEXT_DETECTION. In most cases, an empty value
+ // yields the best results since it enables automatic language detection. For
+ // languages based on the Latin alphabet, setting `language_hints` is not
+ // needed. In rare cases, when the language of the text in the image is known,
+ // setting a hint will help get better results (although it will be a
+ // significant hindrance if the hint is wrong). Text detection returns an
+ // error if one or more of the specified languages is not one of the
+ // [supported languages](/vision/docs/languages).
+ repeated string language_hints = 2;
+
+ // Parameters for crop hints annotation request.
+ CropHintsParams crop_hints_params = 4;
+}
+
+// Request for performing Google Cloud Vision API tasks over a user-provided
+// image, with user-requested features.
+message AnnotateImageRequest {
+ // The image to be processed.
+ Image image = 1;
+
+ // Requested features.
+ repeated Feature features = 2;
+
+ // Additional context that may accompany the image.
+ ImageContext image_context = 3;
+}
+
+// Response to an image annotation request.
+message AnnotateImageResponse {
+ // If present, face detection has completed successfully.
+ repeated FaceAnnotation face_annotations = 1;
+
+ // If present, landmark detection has completed successfully.
+ repeated EntityAnnotation landmark_annotations = 2;
+
+ // If present, logo detection has completed successfully.
+ repeated EntityAnnotation logo_annotations = 3;
+
+ // If present, label detection has completed successfully.
+ repeated EntityAnnotation label_annotations = 4;
+
+ // If present, text (OCR) detection or document (OCR) text detection has
+ // completed successfully.
+ repeated EntityAnnotation text_annotations = 5;
+
+ // If present, text (OCR) detection or document (OCR) text detection has
+ // completed successfully.
+ // This annotation provides the structural hierarchy for the OCR detected
+ // text.
+ TextAnnotation full_text_annotation = 12;
+
+ // If present, safe-search annotation has completed successfully.
+ SafeSearchAnnotation safe_search_annotation = 6;
+
+ // If present, image properties were extracted successfully.
+ ImageProperties image_properties_annotation = 8;
+
+ // If present, crop hints have completed successfully.
+ CropHintsAnnotation crop_hints_annotation = 11;
+
+ // If present, web detection has completed successfully.
+ WebDetection web_detection = 13;
+
+ // If set, represents the error message for the operation.
+ // Note that filled-in image annotations are guaranteed to be
+ // correct, even when `error` is set.
+ google.rpc.Status error = 9;
+}
+
+// Multiple image annotation requests are batched into a single service call.
+message BatchAnnotateImagesRequest {
+ // Individual image annotation requests for this batch.
+ repeated AnnotateImageRequest requests = 1;
+}
+
+// Response to a batch image annotation request.
+message BatchAnnotateImagesResponse {
+ // Individual responses to image annotation requests within the batch.
+ repeated AnnotateImageResponse responses = 1;
+}
+
+// A bucketized representation of likelihood, which is intended to give clients
+// highly stable results across model upgrades.
+enum Likelihood {
+ // Unknown likelihood.
+ UNKNOWN = 0;
+
+ // It is very unlikely that the image belongs to the specified vertical.
+ VERY_UNLIKELY = 1;
+
+ // It is unlikely that the image belongs to the specified vertical.
+ UNLIKELY = 2;
+
+ // It is possible that the image belongs to the specified vertical.
+ POSSIBLE = 3;
+
+ // It is likely that the image belongs to the specified vertical.
+ LIKELY = 4;
+
+ // It is very likely that the image belongs to the specified vertical.
+ VERY_LIKELY = 5;
+}
diff --git a/third_party/googleapis/google/cloud/vision/v1/text_annotation.proto b/third_party/googleapis/google/cloud/vision/v1/text_annotation.proto
new file mode 100644
index 0000000000..938820a3a0
--- /dev/null
+++ b/third_party/googleapis/google/cloud/vision/v1/text_annotation.proto
@@ -0,0 +1,237 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.vision.v1;
+
+import "google/api/annotations.proto";
+import "google/cloud/vision/v1/geometry.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "TextAnnotationProto";
+option java_package = "com.google.cloud.vision.v1";
+
+
+// TextAnnotation contains a structured representation of OCR extracted text.
+// The hierarchy of an OCR extracted text structure is like this:
+// TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
+// Each structural component, starting from Page, may further have their own
+// properties. Properties describe detected languages, breaks etc.. Please
+// refer to the [google.cloud.vision.v1.TextAnnotation.TextProperty][google.cloud.vision.v1.TextAnnotation.TextProperty] message
+// definition below for more detail.
+message TextAnnotation {
+ // Detected language for a structural component.
+ message DetectedLanguage {
+ // The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+ // information, see
+ // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+ string language_code = 1;
+
+ // Confidence of detected language. Range [0, 1].
+ float confidence = 2;
+ }
+
+ // Detected start or end of a structural component.
+ message DetectedBreak {
+ // Enum to denote the type of break found. New line, space etc.
+ enum BreakType {
+ // Unknown break label type.
+ UNKNOWN = 0;
+
+ // Regular space.
+ SPACE = 1;
+
+ // Sure space (very wide).
+ SURE_SPACE = 2;
+
+ // Line-wrapping break.
+ EOL_SURE_SPACE = 3;
+
+ // End-line hyphen that is not present in text; does
+ HYPHEN = 4;
+
+ // not co-occur with SPACE, LEADER_SPACE, or
+ // LINE_BREAK.
+ // Line break that ends a paragraph.
+ LINE_BREAK = 5;
+ }
+
+ BreakType type = 1;
+
+ // True if break prepends the element.
+ bool is_prefix = 2;
+ }
+
+ // Additional information detected on the structural component.
+ message TextProperty {
+ // A list of detected languages together with confidence.
+ repeated DetectedLanguage detected_languages = 1;
+
+ // Detected start or end of a text segment.
+ DetectedBreak detected_break = 2;
+ }
+
+ // List of pages detected by OCR.
+ repeated Page pages = 1;
+
+ // UTF-8 text detected on the pages.
+ string text = 2;
+}
+
+// Detected page from OCR.
+message Page {
+ // Additional information detected on the page.
+ TextAnnotation.TextProperty property = 1;
+
+ // Page width in pixels.
+ int32 width = 2;
+
+ // Page height in pixels.
+ int32 height = 3;
+
+ // List of blocks of text, images etc on this page.
+ repeated Block blocks = 4;
+}
+
+// Logical element on the page.
+message Block {
+ // Type of a block (text, image etc) as identified by OCR.
+ enum BlockType {
+ // Unknown block type.
+ UNKNOWN = 0;
+
+ // Regular text block.
+ TEXT = 1;
+
+ // Table block.
+ TABLE = 2;
+
+ // Image block.
+ PICTURE = 3;
+
+ // Horizontal/vertical line box.
+ RULER = 4;
+
+ // Barcode block.
+ BARCODE = 5;
+ }
+
+ // Additional information detected for the block.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the block.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ // * when the text is horizontal it might look like:
+ // 0----1
+ // | |
+ // 3----2
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ // 2----3
+ // | |
+ // 1----0
+ // and the vertice order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // List of paragraphs in this block (if this blocks is of type text).
+ repeated Paragraph paragraphs = 3;
+
+ // Detected block type (text, image etc) for this block.
+ BlockType block_type = 4;
+}
+
+// Structural unit of text representing a number of words in certain order.
+message Paragraph {
+ // Additional information detected for the paragraph.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the paragraph.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ // * when the text is horizontal it might look like:
+ // 0----1
+ // | |
+ // 3----2
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ // 2----3
+ // | |
+ // 1----0
+ // and the vertice order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // List of words in this paragraph.
+ repeated Word words = 3;
+}
+
+// A word representation.
+message Word {
+ // Additional information detected for the word.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the word.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ // * when the text is horizontal it might look like:
+ // 0----1
+ // | |
+ // 3----2
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ // 2----3
+ // | |
+ // 1----0
+ // and the vertice order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // List of symbols in the word.
+ // The order of the symbols follows the natural reading order.
+ repeated Symbol symbols = 3;
+}
+
+// A single symbol representation.
+message Symbol {
+ // Additional information detected for the symbol.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the symbol.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ // * when the text is horizontal it might look like:
+ // 0----1
+ // | |
+ // 3----2
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ // 2----3
+ // | |
+ // 1----0
+ // and the vertice order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // The actual UTF-8 representation of the symbol.
+ string text = 3;
+}
diff --git a/third_party/googleapis/google/cloud/vision/v1/vision_gapic.yaml b/third_party/googleapis/google/cloud/vision/v1/vision_gapic.yaml
new file mode 100644
index 0000000000..3a5f6484e3
--- /dev/null
+++ b/third_party/googleapis/google/cloud/vision/v1/vision_gapic.yaml
@@ -0,0 +1,58 @@
+type: com.google.api.codegen.ConfigProto
+language_settings:
+ java:
+ package_name: com.google.cloud.vision.spi.v1
+ python:
+ package_name: google.cloud.gapic.vision.v1
+ go:
+ package_name: cloud.google.com/go/vision/apiv1
+ domain_layer_location: cloud.google.com/go/vision
+ csharp:
+ package_name: Google.Cloud.Vision.V1
+ ruby:
+ package_name: Google::Cloud::Vision::V1
+ php:
+ package_name: Google\Cloud\Vision\V1
+ nodejs:
+ package_name: vision.v1
+ domain_layer_location: google-cloud
+license_header:
+ copyright_file: copyright-google.txt
+ license_file: license-header-apache-2.0.txt
+interfaces:
+- name: google.cloud.vision.v1.ImageAnnotator
+ smoke_test:
+ method: BatchAnnotateImages
+ init_fields:
+ - requests[0].image.source.gcs_image_uri="gs://gapic-toolkit/President_Barack_Obama.jpg"
+ - requests[0].features[0].type=FACE_DETECTION
+ collections: []
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 60000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 60000
+ total_timeout_millis: 600000
+ methods:
+ - name: BatchAnnotateImages
+ flattening:
+ groups:
+ - parameters:
+ - requests
+ required_fields:
+ - requests
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
diff --git a/third_party/googleapis/google/cloud/vision/v1/web_detection.proto b/third_party/googleapis/google/cloud/vision/v1/web_detection.proto
new file mode 100644
index 0000000000..6da89756ee
--- /dev/null
+++ b/third_party/googleapis/google/cloud/vision/v1/web_detection.proto
@@ -0,0 +1,78 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.vision.v1;
+
+import "google/api/annotations.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "WebDetectionProto";
+option java_package = "com.google.cloud.vision.v1";
+
+
+// Relevant information for the image from the Internet.
+message WebDetection {
+ // Entity deduced from similar images on the Internet.
+ message WebEntity {
+ // Opaque entity ID.
+ string entity_id = 1;
+
+ // Overall relevancy score for the entity.
+ // Not normalized and not comparable across different image queries.
+ float score = 2;
+
+ // Canonical description of the entity, in English.
+ string description = 3;
+ }
+
+ // Metadata for online images.
+ message WebImage {
+ // The result image URL.
+ string url = 1;
+
+ // Overall relevancy score for the image.
+ // Not normalized and not comparable across different image queries.
+ float score = 2;
+ }
+
+ // Metadata for web pages.
+ message WebPage {
+ // The result web page URL.
+ string url = 1;
+
+ // Overall relevancy score for the web page.
+ // Not normalized and not comparable across different image queries.
+ float score = 2;
+ }
+
+ // Deduced entities from similar images on the Internet.
+ repeated WebEntity web_entities = 1;
+
+ // Fully matching images from the Internet.
+ // They're definite neardups and most often a copy of the query image with
+ // merely a size change.
+ repeated WebImage full_matching_images = 2;
+
+ // Partial matching images from the Internet.
+ // Those images are similar enough to share some key-point features. For
+ // example an original image will likely have partial matching for its crops.
+ repeated WebImage partial_matching_images = 3;
+
+ // Web pages containing the matching images from the Internet.
+ repeated WebPage pages_with_matching_images = 4;
+}
diff --git a/third_party/googleapis/google/cloud/vision/vision.yaml b/third_party/googleapis/google/cloud/vision/vision.yaml
new file mode 100644
index 0000000000..77c3349ffe
--- /dev/null
+++ b/third_party/googleapis/google/cloud/vision/vision.yaml
@@ -0,0 +1,19 @@
+type: google.api.Service
+config_version: 2
+name: vision.googleapis.com
+title: Google Cloud Vision API
+
+apis:
+- name: google.cloud.vision.v1.ImageAnnotator
+
+documentation:
+ summary:
+ 'Integrates Google Vision features, including image labeling, face, logo,
+ and landmark detection, optical character recognition (OCR), and detection
+ of explicit content, into applications.'
+
+authentication:
+ rules:
+ - selector: '*'
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/cloud-platform
diff --git a/third_party/googleapis/google/container/v1/cluster_service.proto b/third_party/googleapis/google/container/v1/cluster_service.proto
new file mode 100644
index 0000000000..8215fa14dd
--- /dev/null
+++ b/third_party/googleapis/google/container/v1/cluster_service.proto
@@ -0,0 +1,986 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.container.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/empty.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/container/v1;container";
+option java_multiple_files = true;
+option java_outer_classname = "ClusterServiceProto";
+option java_package = "com.google.container.v1";
+
+
+// Google Container Engine Cluster Manager v1
+service ClusterManager {
+ // Lists all clusters owned by a project in either the specified zone or all
+ // zones.
+ rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) {
+ option (google.api.http) = { get: "/v1/projects/{project_id}/zones/{zone}/clusters" };
+ }
+
+ // Gets the details of a specific cluster.
+ rpc GetCluster(GetClusterRequest) returns (Cluster) {
+ option (google.api.http) = { get: "/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}" };
+ }
+
+ // Creates a cluster, consisting of the specified number and type of Google
+ // Compute Engine instances.
+ //
+ // By default, the cluster is created in the project's
+ // [default network](/compute/docs/networks-and-firewalls#networks).
+ //
+ // One firewall is added for the cluster. After cluster creation,
+ // the cluster creates routes for each node to allow the containers
+ // on that node to communicate with all other instances in the
+ // cluster.
+ //
+ // Finally, an entry is added to the project's global metadata indicating
+ // which CIDR range is being used by the cluster.
+ rpc CreateCluster(CreateClusterRequest) returns (Operation) {
+ option (google.api.http) = { post: "/v1/projects/{project_id}/zones/{zone}/clusters" body: "*" };
+ }
+
+ // Updates the settings of a specific cluster.
+ rpc UpdateCluster(UpdateClusterRequest) returns (Operation) {
+ option (google.api.http) = { put: "/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}" body: "*" };
+ }
+
+ // Deletes the cluster, including the Kubernetes endpoint and all worker
+ // nodes.
+ //
+ // Firewalls and routes that were configured during cluster creation
+ // are also deleted.
+ //
+ // Other Google Compute Engine resources that might be in use by the cluster
+ // (e.g. load balancer resources) will not be deleted if they weren't present
+ // at the initial create time.
+ rpc DeleteCluster(DeleteClusterRequest) returns (Operation) {
+ option (google.api.http) = { delete: "/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}" };
+ }
+
+ // Lists all operations in a project in a specific zone or all zones.
+ rpc ListOperations(ListOperationsRequest) returns (ListOperationsResponse) {
+ option (google.api.http) = { get: "/v1/projects/{project_id}/zones/{zone}/operations" };
+ }
+
+ // Gets the specified operation.
+ rpc GetOperation(GetOperationRequest) returns (Operation) {
+ option (google.api.http) = { get: "/v1/projects/{project_id}/zones/{zone}/operations/{operation_id}" };
+ }
+
+ // Cancels the specified operation.
+ rpc CancelOperation(CancelOperationRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { post: "/v1/projects/{project_id}/zones/{zone}/operations/{operation_id}:cancel" body: "*" };
+ }
+
+ // Returns configuration info about the Container Engine service.
+ rpc GetServerConfig(GetServerConfigRequest) returns (ServerConfig) {
+ option (google.api.http) = { get: "/v1/projects/{project_id}/zones/{zone}/serverconfig" };
+ }
+
+ // Lists the node pools for a cluster.
+ rpc ListNodePools(ListNodePoolsRequest) returns (ListNodePoolsResponse) {
+ option (google.api.http) = { get: "/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools" };
+ }
+
+ // Retrieves the node pool requested.
+ rpc GetNodePool(GetNodePoolRequest) returns (NodePool) {
+ option (google.api.http) = { get: "/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}" };
+ }
+
+ // Creates a node pool for a cluster.
+ rpc CreateNodePool(CreateNodePoolRequest) returns (Operation) {
+ option (google.api.http) = { post: "/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools" body: "*" };
+ }
+
+ // Deletes a node pool from a cluster.
+ rpc DeleteNodePool(DeleteNodePoolRequest) returns (Operation) {
+ option (google.api.http) = { delete: "/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}" };
+ }
+
+ // Roll back the previously Aborted or Failed NodePool upgrade.
+ // This will be an no-op if the last upgrade successfully completed.
+ rpc RollbackNodePoolUpgrade(RollbackNodePoolUpgradeRequest) returns (Operation) {
+ option (google.api.http) = { post: "/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}:rollback" body: "*" };
+ }
+
+ // Sets the NodeManagement options for a node pool.
+ rpc SetNodePoolManagement(SetNodePoolManagementRequest) returns (Operation) {
+ option (google.api.http) = { post: "/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}/setManagement" body: "*" };
+ }
+}
+
+// Parameters that describe the nodes in a cluster.
+message NodeConfig {
+ // The name of a Google Compute Engine [machine
+ // type](/compute/docs/machine-types) (e.g.
+ // `n1-standard-1`).
+ //
+ // If unspecified, the default machine type is
+ // `n1-standard-1`.
+ string machine_type = 1;
+
+ // Size of the disk attached to each node, specified in GB.
+ // The smallest allowed disk size is 10GB.
+ //
+ // If unspecified, the default disk size is 100GB.
+ int32 disk_size_gb = 2;
+
+ // The set of Google API scopes to be made available on all of the
+ // node VMs under the "default" service account.
+ //
+ // The following scopes are recommended, but not required, and by default are
+ // not included:
+ //
+ // * `https://www.googleapis.com/auth/compute` is required for mounting
+ // persistent storage on your nodes.
+ // * `https://www.googleapis.com/auth/devstorage.read_only` is required for
+ // communicating with **gcr.io**
+ // (the [Google Container Registry](/container-registry/)).
+ //
+ // If unspecified, no scopes are added, unless Cloud Logging or Cloud
+ // Monitoring are enabled, in which case their required scopes will be added.
+ repeated string oauth_scopes = 3;
+
+ // The Google Cloud Platform Service Account to be used by the node VMs. If
+ // no Service Account is specified, the "default" service account is used.
+ string service_account = 9;
+
+ // The metadata key/value pairs assigned to instances in the cluster.
+ //
+ // Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes
+ // in length. These are reflected as part of a URL in the metadata server.
+ // Additionally, to avoid ambiguity, keys must not conflict with any other
+ // metadata keys for the project or be one of the four reserved keys:
+ // "instance-template", "kube-env", "startup-script", and "user-data"
+ //
+ // Values are free-form strings, and only have meaning as interpreted by
+ // the image running in the instance. The only restriction placed on them is
+ // that each value's size must be less than or equal to 32 KB.
+ //
+ // The total size of all keys and values must be less than 512 KB.
+ map<string, string> metadata = 4;
+
+ // The image type to use for this node. Note that for a given image type,
+ // the latest version of it will be used.
+ string image_type = 5;
+
+ // The map of Kubernetes labels (key/value pairs) to be applied to each node.
+ // These will added in addition to any default label(s) that
+ // Kubernetes may apply to the node.
+ // In case of conflict in label keys, the applied set may differ depending on
+ // the Kubernetes version -- it's best to assume the behavior is undefined
+ // and conflicts should be avoided.
+ // For more information, including usage and the valid values, see:
+ // http://kubernetes.io/v1.1/docs/user-guide/labels.html
+ map<string, string> labels = 6;
+
+ // The number of local SSD disks to be attached to the node.
+ //
+ // The limit for this value is dependant upon the maximum number of
+ // disks available on a machine per zone. See:
+ // https://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits
+ // for more information.
+ int32 local_ssd_count = 7;
+
+ // The list of instance tags applied to all nodes. Tags are used to identify
+ // valid sources or targets for network firewalls and are specified by
+ // the client during cluster or node pool creation. Each tag within the list
+ // must comply with RFC1035.
+ repeated string tags = 8;
+
+ // Whether the nodes are created as preemptible VM instances. See:
+ // https://cloud.google.com/compute/docs/instances/preemptible for more
+ // inforamtion about preemptible VM instances.
+ bool preemptible = 10;
+}
+
+// The authentication information for accessing the master endpoint.
+// Authentication can be done using HTTP basic auth or using client
+// certificates.
+message MasterAuth {
+ // The username to use for HTTP basic authentication to the master endpoint.
+ string username = 1;
+
+ // The password to use for HTTP basic authentication to the master endpoint.
+ // Because the master endpoint is open to the Internet, you should create a
+ // strong password.
+ string password = 2;
+
+ // [Output only] Base64-encoded public certificate that is the root of
+ // trust for the cluster.
+ string cluster_ca_certificate = 100;
+
+ // [Output only] Base64-encoded public certificate used by clients to
+ // authenticate to the cluster endpoint.
+ string client_certificate = 101;
+
+ // [Output only] Base64-encoded private key used by clients to authenticate
+ // to the cluster endpoint.
+ string client_key = 102;
+}
+
+// Configuration for the addons that can be automatically spun up in the
+// cluster, enabling additional functionality.
+message AddonsConfig {
+ // Configuration for the HTTP (L7) load balancing controller addon, which
+ // makes it easy to set up HTTP load balancers for services in a cluster.
+ HttpLoadBalancing http_load_balancing = 1;
+
+ // Configuration for the horizontal pod autoscaling feature, which
+ // increases or decreases the number of replica pods a replication controller
+ // has based on the resource usage of the existing pods.
+ HorizontalPodAutoscaling horizontal_pod_autoscaling = 2;
+}
+
+// Configuration options for the HTTP (L7) load balancing controller addon,
+// which makes it easy to set up HTTP load balancers for services in a cluster.
+message HttpLoadBalancing {
+ // Whether the HTTP Load Balancing controller is enabled in the cluster.
+ // When enabled, it runs a small pod in the cluster that manages the load
+ // balancers.
+ bool disabled = 1;
+}
+
+// Configuration options for the horizontal pod autoscaling feature, which
+// increases or decreases the number of replica pods a replication controller
+// has based on the resource usage of the existing pods.
+message HorizontalPodAutoscaling {
+ // Whether the Horizontal Pod Autoscaling feature is enabled in the cluster.
+ // When enabled, it ensures that a Heapster pod is running in the cluster,
+ // which is also used by the Cloud Monitoring service.
+ bool disabled = 1;
+}
+
+// A Google Container Engine cluster.
+message Cluster {
+ // The current status of the cluster.
+ enum Status {
+ // Not set.
+ STATUS_UNSPECIFIED = 0;
+
+ // The PROVISIONING state indicates the cluster is being created.
+ PROVISIONING = 1;
+
+ // The RUNNING state indicates the cluster has been created and is fully
+ // usable.
+ RUNNING = 2;
+
+ // The RECONCILING state indicates that some work is actively being done on
+ // the cluster, such as upgrading the master or node software. Details can
+ // be found in the `statusMessage` field.
+ RECONCILING = 3;
+
+ // The STOPPING state indicates the cluster is being deleted.
+ STOPPING = 4;
+
+ // The ERROR state indicates the cluster may be unusable. Details
+ // can be found in the `statusMessage` field.
+ ERROR = 5;
+ }
+
+ // The name of this cluster. The name must be unique within this project
+ // and zone, and can be up to 40 characters with the following restrictions:
+ //
+ // * Lowercase letters, numbers, and hyphens only.
+ // * Must start with a letter.
+ // * Must end with a number or a letter.
+ string name = 1;
+
+ // An optional description of this cluster.
+ string description = 2;
+
+ // The number of nodes to create in this cluster. You must ensure that your
+ // Compute Engine <a href="/compute/docs/resource-quotas">resource quota</a>
+ // is sufficient for this number of instances. You must also have available
+ // firewall and routes quota.
+ // For requests, this field should only be used in lieu of a
+ // "node_pool" object, since this configuration (along with the
+ // "node_config") will be used to create a "NodePool" object with an
+ // auto-generated name. Do not use this and a node_pool at the same time.
+ int32 initial_node_count = 3;
+
+ // Parameters used in creating the cluster's nodes.
+ // See `nodeConfig` for the description of its properties.
+ // For requests, this field should only be used in lieu of a
+ // "node_pool" object, since this configuration (along with the
+ // "initial_node_count") will be used to create a "NodePool" object with an
+ // auto-generated name. Do not use this and a node_pool at the same time.
+ // For responses, this field will be populated with the node configuration of
+ // the first node pool.
+ //
+ // If unspecified, the defaults are used.
+ NodeConfig node_config = 4;
+
+ // The authentication information for accessing the master endpoint.
+ MasterAuth master_auth = 5;
+
+ // The logging service the cluster should use to write logs.
+ // Currently available options:
+ //
+ // * `logging.googleapis.com` - the Google Cloud Logging service.
+ // * `none` - no logs will be exported from the cluster.
+ // * if left as an empty string,`logging.googleapis.com` will be used.
+ string logging_service = 6;
+
+ // The monitoring service the cluster should use to write metrics.
+ // Currently available options:
+ //
+ // * `monitoring.googleapis.com` - the Google Cloud Monitoring service.
+ // * `none` - no metrics will be exported from the cluster.
+ // * if left as an empty string, `monitoring.googleapis.com` will be used.
+ string monitoring_service = 7;
+
+ // The name of the Google Compute Engine
+ // [network](/compute/docs/networks-and-firewalls#networks) to which the
+ // cluster is connected. If left unspecified, the `default` network
+ // will be used.
+ string network = 8;
+
+ // The IP address range of the container pods in this cluster, in
+ // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
+ // notation (e.g. `10.96.0.0/14`). Leave blank to have
+ // one automatically chosen or specify a `/14` block in `10.0.0.0/8`.
+ string cluster_ipv4_cidr = 9;
+
+ // Configurations for the various addons available to run in the cluster.
+ AddonsConfig addons_config = 10;
+
+ // The name of the Google Compute Engine
+ // [subnetwork](/compute/docs/subnetworks) to which the
+ // cluster is connected.
+ string subnetwork = 11;
+
+ // The node pools associated with this cluster.
+ // This field should not be set if "node_config" or "initial_node_count" are
+ // specified.
+ repeated NodePool node_pools = 12;
+
+ // The list of Google Compute Engine
+ // [locations](/compute/docs/zones#available) in which the cluster's nodes
+ // should be located.
+ repeated string locations = 13;
+
+ // Kubernetes alpha features are enabled on this cluster. This includes alpha
+ // API groups (e.g. v1alpha1) and features that may not be production ready in
+ // the kubernetes version of the master and nodes.
+ // The cluster has no SLA for uptime and master/node upgrades are disabled.
+ // Alpha enabled clusters are automatically deleted thirty days after
+ // creation.
+ bool enable_kubernetes_alpha = 14;
+
+ // [Output only] Server-defined URL for the resource.
+ string self_link = 100;
+
+ // [Output only] The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ string zone = 101;
+
+ // [Output only] The IP address of this cluster's master endpoint.
+ // The endpoint can be accessed from the internet at
+ // `https://username:password@endpoint/`.
+ //
+ // See the `masterAuth` property of this resource for username and
+ // password information.
+ string endpoint = 102;
+
+ // [Output only] The software version of the master endpoint and kubelets used
+ // in the cluster when it was first created. The version can be upgraded over
+ // time.
+ string initial_cluster_version = 103;
+
+ // [Output only] The current software version of the master endpoint.
+ string current_master_version = 104;
+
+ // [Output only] The current version of the node software components.
+ // If they are currently at multiple versions because they're in the process
+ // of being upgraded, this reflects the minimum version of all nodes.
+ string current_node_version = 105;
+
+ // [Output only] The time the cluster was created, in
+ // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
+ string create_time = 106;
+
+ // [Output only] The current status of this cluster.
+ Status status = 107;
+
+ // [Output only] Additional information about the current status of this
+ // cluster, if available.
+ string status_message = 108;
+
+ // [Output only] The size of the address space on each node for hosting
+ // containers. This is provisioned from within the `container_ipv4_cidr`
+ // range.
+ int32 node_ipv4_cidr_size = 109;
+
+ // [Output only] The IP address range of the Kubernetes services in
+ // this cluster, in
+ // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
+ // notation (e.g. `1.2.3.4/29`). Service addresses are
+ // typically put in the last `/16` from the container CIDR.
+ string services_ipv4_cidr = 110;
+
+ // [Output only] The resource URLs of [instance
+ // groups](/compute/docs/instance-groups/) associated with this
+ // cluster.
+ repeated string instance_group_urls = 111;
+
+ // [Output only] The number of nodes currently in the cluster.
+ int32 current_node_count = 112;
+
+ // [Output only] The time the cluster will be automatically
+ // deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
+ string expire_time = 113;
+}
+
+// ClusterUpdate describes an update to the cluster. Exactly one update can
+// be applied to a cluster with each request, so at most one field can be
+// provided.
+message ClusterUpdate {
+ // The Kubernetes version to change the nodes to (typically an
+ // upgrade). Use `-` to upgrade to the latest version supported by
+ // the server.
+ string desired_node_version = 4;
+
+ // The monitoring service the cluster should use to write metrics.
+ // Currently available options:
+ //
+ // * "monitoring.googleapis.com" - the Google Cloud Monitoring service
+ // * "none" - no metrics will be exported from the cluster
+ string desired_monitoring_service = 5;
+
+ // Configurations for the various addons available to run in the cluster.
+ AddonsConfig desired_addons_config = 6;
+
+ // The node pool to be upgraded. This field is mandatory if
+ // "desired_node_version", "desired_image_family" or
+ // "desired_node_pool_autoscaling" is specified and there is more than one
+ // node pool on the cluster.
+ string desired_node_pool_id = 7;
+
+ // The desired image type for the node pool.
+ // NOTE: Set the "desired_node_pool" field as well.
+ string desired_image_type = 8;
+
+ // Autoscaler configuration for the node pool specified in
+ // desired_node_pool_id. If there is only one pool in the
+ // cluster and desired_node_pool_id is not provided then
+ // the change applies to that single node pool.
+ NodePoolAutoscaling desired_node_pool_autoscaling = 9;
+
+ // The desired list of Google Compute Engine
+ // [locations](/compute/docs/zones#available) in which the cluster's nodes
+ // should be located. Changing the locations a cluster is in will result
+ // in nodes being either created or removed from the cluster, depending on
+ // whether locations are being added or removed.
+ //
+ // This list must always include the cluster's primary zone.
+ repeated string desired_locations = 10;
+
+ // The Kubernetes version to change the master to. The only valid value is the
+ // latest supported version. Use "-" to have the server automatically select
+ // the latest version.
+ string desired_master_version = 100;
+}
+
+// This operation resource represents operations that may have happened or are
+// happening on the cluster. All fields are output only.
+message Operation {
+ // Current status of the operation.
+ enum Status {
+ // Not set.
+ STATUS_UNSPECIFIED = 0;
+
+ // The operation has been created.
+ PENDING = 1;
+
+ // The operation is currently running.
+ RUNNING = 2;
+
+ // The operation is done, either cancelled or completed.
+ DONE = 3;
+
+ // The operation is aborting.
+ ABORTING = 4;
+ }
+
+ // Operation type.
+ enum Type {
+ // Not set.
+ TYPE_UNSPECIFIED = 0;
+
+ // Cluster create.
+ CREATE_CLUSTER = 1;
+
+ // Cluster delete.
+ DELETE_CLUSTER = 2;
+
+ // A master upgrade.
+ UPGRADE_MASTER = 3;
+
+ // A node upgrade.
+ UPGRADE_NODES = 4;
+
+ // Cluster repair.
+ REPAIR_CLUSTER = 5;
+
+ // Cluster update.
+ UPDATE_CLUSTER = 6;
+
+ // Node pool create.
+ CREATE_NODE_POOL = 7;
+
+ // Node pool delete.
+ DELETE_NODE_POOL = 8;
+
+ // Set node pool management.
+ SET_NODE_POOL_MANAGEMENT = 9;
+ }
+
+ // The server-assigned ID for the operation.
+ string name = 1;
+
+ // The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the operation
+ // is taking place.
+ string zone = 2;
+
+ // The operation type.
+ Type operation_type = 3;
+
+ // The current status of the operation.
+ Status status = 4;
+
+ // Detailed operation progress, if available.
+ string detail = 8;
+
+ // If an error has occurred, a textual description of the error.
+ string status_message = 5;
+
+ // Server-defined URL for the resource.
+ string self_link = 6;
+
+ // Server-defined URL for the target of the operation.
+ string target_link = 7;
+}
+
+// CreateClusterRequest creates a cluster.
+message CreateClusterRequest {
+ // The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ string project_id = 1;
+
+ // The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ string zone = 2;
+
+ // A [cluster
+ // resource](/container-engine/reference/rest/v1/projects.zones.clusters)
+ Cluster cluster = 3;
+}
+
+// GetClusterRequest gets the settings of a cluster.
+message GetClusterRequest {
+ // The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ string project_id = 1;
+
+ // The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ string zone = 2;
+
+ // The name of the cluster to retrieve.
+ string cluster_id = 3;
+}
+
+// UpdateClusterRequest updates the settings of a cluster.
+message UpdateClusterRequest {
+ // The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ string project_id = 1;
+
+ // The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ string zone = 2;
+
+ // The name of the cluster to upgrade.
+ string cluster_id = 3;
+
+ // A description of the update.
+ ClusterUpdate update = 4;
+}
+
+// DeleteClusterRequest deletes a cluster.
+message DeleteClusterRequest {
+ // The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ string project_id = 1;
+
+ // The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ string zone = 2;
+
+ // The name of the cluster to delete.
+ string cluster_id = 3;
+}
+
+// ListClustersRequest lists clusters.
+message ListClustersRequest {
+ // The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ string project_id = 1;
+
+ // The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides, or "-" for all zones.
+ string zone = 2;
+}
+
+// ListClustersResponse is the result of ListClustersRequest.
+message ListClustersResponse {
+ // A list of clusters in the project in the specified zone, or
+ // across all ones.
+ repeated Cluster clusters = 1;
+
+ // If any zones are listed here, the list of clusters returned
+ // may be missing those zones.
+ repeated string missing_zones = 2;
+}
+
+// GetOperationRequest gets a single operation.
+message GetOperationRequest {
+ // The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ string project_id = 1;
+
+ // The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ string zone = 2;
+
+ // The server-assigned `name` of the operation.
+ string operation_id = 3;
+}
+
+// ListOperationsRequest lists operations.
+message ListOperationsRequest {
+ // The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ string project_id = 1;
+
+ // The name of the Google Compute Engine [zone](/compute/docs/zones#available)
+ // to return operations for, or `-` for all zones.
+ string zone = 2;
+}
+
+// CancelOperationRequest cancels a single operation.
+message CancelOperationRequest {
+ // The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ string project_id = 1;
+
+ // The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the operation resides.
+ string zone = 2;
+
+ // The server-assigned `name` of the operation.
+ string operation_id = 3;
+}
+
+// ListOperationsResponse is the result of ListOperationsRequest.
+message ListOperationsResponse {
+ // A list of operations in the project in the specified zone.
+ repeated Operation operations = 1;
+
+ // If any zones are listed here, the list of operations returned
+ // may be missing the operations from those zones.
+ repeated string missing_zones = 2;
+}
+
+// Gets the current Container Engine service configuration.
+message GetServerConfigRequest {
+ // The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ string project_id = 1;
+
+ // The name of the Google Compute Engine [zone](/compute/docs/zones#available)
+ // to return operations for.
+ string zone = 2;
+}
+
+// Container Engine service configuration.
+message ServerConfig {
+ // Version of Kubernetes the service deploys by default.
+ string default_cluster_version = 1;
+
+ // List of valid node upgrade target versions.
+ repeated string valid_node_versions = 3;
+
+ // Default image type.
+ string default_image_type = 4;
+
+ // List of valid image types.
+ repeated string valid_image_types = 5;
+
+ // List of valid master versions.
+ repeated string valid_master_versions = 6;
+}
+
+// CreateNodePoolRequest creates a node pool for a cluster.
+message CreateNodePoolRequest {
+ // The Google Developers Console [project ID or project
+ // number](https://developers.google.com/console/help/new/#projectnumber).
+ string project_id = 1;
+
+ // The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ string zone = 2;
+
+ // The name of the cluster.
+ string cluster_id = 3;
+
+ // The node pool to create.
+ NodePool node_pool = 4;
+}
+
+// DeleteNodePoolRequest deletes a node pool for a cluster.
+message DeleteNodePoolRequest {
+ // The Google Developers Console [project ID or project
+ // number](https://developers.google.com/console/help/new/#projectnumber).
+ string project_id = 1;
+
+ // The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ string zone = 2;
+
+ // The name of the cluster.
+ string cluster_id = 3;
+
+ // The name of the node pool to delete.
+ string node_pool_id = 4;
+}
+
+// ListNodePoolsRequest lists the node pool(s) for a cluster.
+message ListNodePoolsRequest {
+ // The Google Developers Console [project ID or project
+ // number](https://developers.google.com/console/help/new/#projectnumber).
+ string project_id = 1;
+
+ // The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ string zone = 2;
+
+ // The name of the cluster.
+ string cluster_id = 3;
+}
+
+// GetNodePoolRequest retrieves a node pool for a cluster.
+message GetNodePoolRequest {
+ // The Google Developers Console [project ID or project
+ // number](https://developers.google.com/console/help/new/#projectnumber).
+ string project_id = 1;
+
+ // The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ string zone = 2;
+
+ // The name of the cluster.
+ string cluster_id = 3;
+
+ // The name of the node pool.
+ string node_pool_id = 4;
+}
+
+// NodePool contains the name and configuration for a cluster's node pool.
+// Node pools are a set of nodes (i.e. VM's), with a common configuration and
+// specification, under the control of the cluster master. They may have a set
+// of Kubernetes labels applied to them, which may be used to reference them
+// during pod scheduling. They may also be resized up or down, to accommodate
+// the workload.
+message NodePool {
+ // The current status of the node pool instance.
+ enum Status {
+ // Not set.
+ STATUS_UNSPECIFIED = 0;
+
+ // The PROVISIONING state indicates the node pool is being created.
+ PROVISIONING = 1;
+
+ // The RUNNING state indicates the node pool has been created
+ // and is fully usable.
+ RUNNING = 2;
+
+ // The RUNNING_WITH_ERROR state indicates the node pool has been created
+ // and is partially usable. Some error state has occurred and some
+ // functionality may be impaired. Customer may need to reissue a request
+ // or trigger a new update.
+ RUNNING_WITH_ERROR = 3;
+
+ // The RECONCILING state indicates that some work is actively being done on
+ // the node pool, such as upgrading node software. Details can
+ // be found in the `statusMessage` field.
+ RECONCILING = 4;
+
+ // The STOPPING state indicates the node pool is being deleted.
+ STOPPING = 5;
+
+ // The ERROR state indicates the node pool may be unusable. Details
+ // can be found in the `statusMessage` field.
+ ERROR = 6;
+ }
+
+ // The name of the node pool.
+ string name = 1;
+
+ // The node configuration of the pool.
+ NodeConfig config = 2;
+
+ // The initial node count for the pool. You must ensure that your
+ // Compute Engine <a href="/compute/docs/resource-quotas">resource quota</a>
+ // is sufficient for this number of instances. You must also have available
+ // firewall and routes quota.
+ int32 initial_node_count = 3;
+
+ // [Output only] Server-defined URL for the resource.
+ string self_link = 100;
+
+ // [Output only] The version of the Kubernetes of this node.
+ string version = 101;
+
+ // [Output only] The resource URLs of [instance
+ // groups](/compute/docs/instance-groups/) associated with this
+ // node pool.
+ repeated string instance_group_urls = 102;
+
+ // [Output only] The status of the nodes in this pool instance.
+ Status status = 103;
+
+ // [Output only] Additional information about the current status of this
+ // node pool instance, if available.
+ string status_message = 104;
+
+ // Autoscaler configuration for this NodePool. Autoscaler is enabled
+ // only if a valid configuration is present.
+ NodePoolAutoscaling autoscaling = 4;
+
+ // NodeManagement configuration for this NodePool.
+ NodeManagement management = 5;
+}
+
+// NodeManagement defines the set of node management services turned on for the
+// node pool.
+message NodeManagement {
+ // A flag that specifies whether node auto-upgrade is enabled for the node
+ // pool. If enabled, node auto-upgrade helps keep the nodes in your node pool
+ // up to date with the latest release version of Kubernetes.
+ bool auto_upgrade = 1;
+
+ // A flag that specifies whether the node auto-repair is enabled for the node
+ // pool. If enabled, the nodes in this node pool will be monitored and, if
+ // they fail health checks too many times, an automatic repair action will be
+ // triggered.
+ bool auto_repair = 2;
+
+ // Specifies the Auto Upgrade knobs for the node pool.
+ AutoUpgradeOptions upgrade_options = 10;
+}
+
+// AutoUpgradeOptions defines the set of options for the user to control how
+// the Auto Upgrades will proceed.
+message AutoUpgradeOptions {
+ // [Output only] This field is set when upgrades are about to commence
+ // with the approximate start time for the upgrades, in
+ // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
+ string auto_upgrade_start_time = 1;
+
+ // [Output only] This field is set when upgrades are about to commence
+ // with the description of the upgrade.
+ string description = 2;
+}
+
+// SetNodePoolManagementRequest sets the node management properties of a node
+// pool.
+message SetNodePoolManagementRequest {
+ // The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ string project_id = 1;
+
+ // The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ string zone = 2;
+
+ // The name of the cluster to update.
+ string cluster_id = 3;
+
+ // The name of the node pool to update.
+ string node_pool_id = 4;
+
+ // NodeManagement configuration for the node pool.
+ NodeManagement management = 5;
+}
+
+// RollbackNodePoolUpgradeRequest rollbacks the previously Aborted or Failed
+// NodePool upgrade. This will be an no-op if the last upgrade successfully
+// completed.
+message RollbackNodePoolUpgradeRequest {
+ // The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ string project_id = 1;
+
+ // The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ string zone = 2;
+
+ // The name of the cluster to rollback.
+ string cluster_id = 3;
+
+ // The name of the node pool to rollback.
+ string node_pool_id = 4;
+}
+
+// ListNodePoolsResponse is the result of ListNodePoolsRequest.
+message ListNodePoolsResponse {
+ // A list of node pools for a cluster.
+ repeated NodePool node_pools = 1;
+}
+
+// NodePoolAutoscaling contains information required by cluster autoscaler to
+// adjust the size of the node pool to the current cluster usage.
+message NodePoolAutoscaling {
+ // Is autoscaling enabled for this node pool.
+ bool enabled = 1;
+
+ // Minimum number of nodes in the NodePool. Must be >= 1 and <=
+ // max_node_count.
+ int32 min_node_count = 2;
+
+ // Maximum number of nodes in the NodePool. Must be >= min_node_count. There
+ // has to enough quota to scale up the cluster.
+ int32 max_node_count = 3;
+}
diff --git a/third_party/googleapis/google/datastore/datastore.yaml b/third_party/googleapis/google/datastore/datastore.yaml
new file mode 100644
index 0000000000..b135105011
--- /dev/null
+++ b/third_party/googleapis/google/datastore/datastore.yaml
@@ -0,0 +1,22 @@
+type: google.api.Service
+config_version: 1
+name: datastore.googleapis.com
+
+title: Google Cloud Datastore API
+
+documentation:
+ summary: >
+ Accesses the schemaless NoSQL database to provide fully managed, robust,
+ scalable storage for your application.
+
+apis:
+- name: google.datastore.v1.Datastore
+
+# Auth section
+authentication:
+ rules:
+ - selector: '*'
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/datastore,
+ https://www.googleapis.com/auth/cloud-platform
+
diff --git a/third_party/googleapis/google/datastore/v1/datastore.proto b/third_party/googleapis/google/datastore/v1/datastore.proto
new file mode 100644
index 0000000000..11acab6b76
--- /dev/null
+++ b/third_party/googleapis/google/datastore/v1/datastore.proto
@@ -0,0 +1,318 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.datastore.v1;
+
+import "google/api/annotations.proto";
+import "google/datastore/v1/entity.proto";
+import "google/datastore/v1/query.proto";
+
+option csharp_namespace = "Google.Cloud.Datastore.V1";
+option go_package = "google.golang.org/genproto/googleapis/datastore/v1;datastore";
+option java_multiple_files = true;
+option java_outer_classname = "DatastoreProto";
+option java_package = "com.google.datastore.v1";
+
+
+// Each RPC normalizes the partition IDs of the keys in its input entities,
+// and always returns entities with keys with normalized partition IDs.
+// This applies to all keys and entities, including those in values, except keys
+// with both an empty path and an empty or unset partition ID. Normalization of
+// input keys sets the project ID (if not already set) to the project ID from
+// the request.
+//
+service Datastore {
+ // Looks up entities by key.
+ rpc Lookup(LookupRequest) returns (LookupResponse) {
+ option (google.api.http) = { post: "/v1/projects/{project_id}:lookup" body: "*" };
+ }
+
+ // Queries for entities.
+ rpc RunQuery(RunQueryRequest) returns (RunQueryResponse) {
+ option (google.api.http) = { post: "/v1/projects/{project_id}:runQuery" body: "*" };
+ }
+
+ // Begins a new transaction.
+ rpc BeginTransaction(BeginTransactionRequest) returns (BeginTransactionResponse) {
+ option (google.api.http) = { post: "/v1/projects/{project_id}:beginTransaction" body: "*" };
+ }
+
+ // Commits a transaction, optionally creating, deleting or modifying some
+ // entities.
+ rpc Commit(CommitRequest) returns (CommitResponse) {
+ option (google.api.http) = { post: "/v1/projects/{project_id}:commit" body: "*" };
+ }
+
+ // Rolls back a transaction.
+ rpc Rollback(RollbackRequest) returns (RollbackResponse) {
+ option (google.api.http) = { post: "/v1/projects/{project_id}:rollback" body: "*" };
+ }
+
+ // Allocates IDs for the given keys, which is useful for referencing an entity
+ // before it is inserted.
+ rpc AllocateIds(AllocateIdsRequest) returns (AllocateIdsResponse) {
+ option (google.api.http) = { post: "/v1/projects/{project_id}:allocateIds" body: "*" };
+ }
+}
+
+// The request for [Datastore.Lookup][google.datastore.v1.Datastore.Lookup].
+message LookupRequest {
+ // The ID of the project against which to make the request.
+ string project_id = 8;
+
+ // The options for this lookup request.
+ ReadOptions read_options = 1;
+
+ // Keys of entities to look up.
+ repeated Key keys = 3;
+}
+
+// The response for [Datastore.Lookup][google.datastore.v1.Datastore.Lookup].
+message LookupResponse {
+ // Entities found as `ResultType.FULL` entities. The order of results in this
+ // field is undefined and has no relation to the order of the keys in the
+ // input.
+ repeated EntityResult found = 1;
+
+ // Entities not found as `ResultType.KEY_ONLY` entities. The order of results
+ // in this field is undefined and has no relation to the order of the keys
+ // in the input.
+ repeated EntityResult missing = 2;
+
+ // A list of keys that were not looked up due to resource constraints. The
+ // order of results in this field is undefined and has no relation to the
+ // order of the keys in the input.
+ repeated Key deferred = 3;
+}
+
+// The request for [Datastore.RunQuery][google.datastore.v1.Datastore.RunQuery].
+message RunQueryRequest {
+ // The ID of the project against which to make the request.
+ string project_id = 8;
+
+ // Entities are partitioned into subsets, identified by a partition ID.
+ // Queries are scoped to a single partition.
+ // This partition ID is normalized with the standard default context
+ // partition ID.
+ PartitionId partition_id = 2;
+
+ // The options for this query.
+ ReadOptions read_options = 1;
+
+ // The type of query.
+ oneof query_type {
+ // The query to run.
+ Query query = 3;
+
+ // The GQL query to run.
+ GqlQuery gql_query = 7;
+ }
+}
+
+// The response for [Datastore.RunQuery][google.datastore.v1.Datastore.RunQuery].
+message RunQueryResponse {
+ // A batch of query results (always present).
+ QueryResultBatch batch = 1;
+
+ // The parsed form of the `GqlQuery` from the request, if it was set.
+ Query query = 2;
+}
+
+// The request for [Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction].
+message BeginTransactionRequest {
+ // The ID of the project against which to make the request.
+ string project_id = 8;
+}
+
+// The response for [Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction].
+message BeginTransactionResponse {
+ // The transaction identifier (always present).
+ bytes transaction = 1;
+}
+
+// The request for [Datastore.Rollback][google.datastore.v1.Datastore.Rollback].
+message RollbackRequest {
+ // The ID of the project against which to make the request.
+ string project_id = 8;
+
+ // The transaction identifier, returned by a call to
+ // [Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction].
+ bytes transaction = 1;
+}
+
+// The response for [Datastore.Rollback][google.datastore.v1.Datastore.Rollback].
+// (an empty message).
+message RollbackResponse {
+
+}
+
+// The request for [Datastore.Commit][google.datastore.v1.Datastore.Commit].
+message CommitRequest {
+ // The modes available for commits.
+ enum Mode {
+ // Unspecified. This value must not be used.
+ MODE_UNSPECIFIED = 0;
+
+ // Transactional: The mutations are either all applied, or none are applied.
+ // Learn about transactions [here](https://cloud.google.com/datastore/docs/concepts/transactions).
+ TRANSACTIONAL = 1;
+
+ // Non-transactional: The mutations may not apply as all or none.
+ NON_TRANSACTIONAL = 2;
+ }
+
+ // The ID of the project against which to make the request.
+ string project_id = 8;
+
+ // The type of commit to perform. Defaults to `TRANSACTIONAL`.
+ Mode mode = 5;
+
+ // Must be set when mode is `TRANSACTIONAL`.
+ oneof transaction_selector {
+ // The identifier of the transaction associated with the commit. A
+ // transaction identifier is returned by a call to
+ // [Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction].
+ bytes transaction = 1;
+ }
+
+ // The mutations to perform.
+ //
+ // When mode is `TRANSACTIONAL`, mutations affecting a single entity are
+ // applied in order. The following sequences of mutations affecting a single
+ // entity are not permitted in a single `Commit` request:
+ //
+ // - `insert` followed by `insert`
+ // - `update` followed by `insert`
+ // - `upsert` followed by `insert`
+ // - `delete` followed by `update`
+ //
+ // When mode is `NON_TRANSACTIONAL`, no two mutations may affect a single
+ // entity.
+ repeated Mutation mutations = 6;
+}
+
+// The response for [Datastore.Commit][google.datastore.v1.Datastore.Commit].
+message CommitResponse {
+ // The result of performing the mutations.
+ // The i-th mutation result corresponds to the i-th mutation in the request.
+ repeated MutationResult mutation_results = 3;
+
+ // The number of index entries updated during the commit, or zero if none were
+ // updated.
+ int32 index_updates = 4;
+}
+
+// The request for [Datastore.AllocateIds][google.datastore.v1.Datastore.AllocateIds].
+message AllocateIdsRequest {
+ // The ID of the project against which to make the request.
+ string project_id = 8;
+
+ // A list of keys with incomplete key paths for which to allocate IDs.
+ // No key may be reserved/read-only.
+ repeated Key keys = 1;
+}
+
+// The response for [Datastore.AllocateIds][google.datastore.v1.Datastore.AllocateIds].
+message AllocateIdsResponse {
+ // The keys specified in the request (in the same order), each with
+ // its key path completed with a newly allocated ID.
+ repeated Key keys = 1;
+}
+
+// A mutation to apply to an entity.
+message Mutation {
+ // The mutation operation.
+ //
+ // For `insert`, `update`, and `upsert`:
+ // - The entity's key must not be reserved/read-only.
+ // - No property in the entity may have a reserved name,
+ // not even a property in an entity in a value.
+ // - No value in the entity may have meaning 18,
+ // not even a value in an entity in another value.
+ oneof operation {
+ // The entity to insert. The entity must not already exist.
+ // The entity key's final path element may be incomplete.
+ Entity insert = 4;
+
+ // The entity to update. The entity must already exist.
+ // Must have a complete key path.
+ Entity update = 5;
+
+ // The entity to upsert. The entity may or may not already exist.
+ // The entity key's final path element may be incomplete.
+ Entity upsert = 6;
+
+ // The key of the entity to delete. The entity may or may not already exist.
+ // Must have a complete key path and must not be reserved/read-only.
+ Key delete = 7;
+ }
+
+ // When set, the server will detect whether or not this mutation conflicts
+ // with the current version of the entity on the server. Conflicting mutations
+ // are not applied, and are marked as such in MutationResult.
+ oneof conflict_detection_strategy {
+ // The version of the entity that this mutation is being applied to. If this
+ // does not match the current version on the server, the mutation conflicts.
+ int64 base_version = 8;
+ }
+}
+
+// The result of applying a mutation.
+message MutationResult {
+ // The automatically allocated key.
+ // Set only when the mutation allocated a key.
+ Key key = 3;
+
+ // The version of the entity on the server after processing the mutation. If
+ // the mutation doesn't change anything on the server, then the version will
+ // be the version of the current entity or, if no entity is present, a version
+ // that is strictly greater than the version of any previous entity and less
+ // than the version of any possible future entity.
+ int64 version = 4;
+
+ // Whether a conflict was detected for this mutation. Always false when a
+ // conflict detection strategy field is not set in the mutation.
+ bool conflict_detected = 5;
+}
+
+// The options shared by read requests.
+message ReadOptions {
+ // The possible values for read consistencies.
+ enum ReadConsistency {
+ // Unspecified. This value must not be used.
+ READ_CONSISTENCY_UNSPECIFIED = 0;
+
+ // Strong consistency.
+ STRONG = 1;
+
+ // Eventual consistency.
+ EVENTUAL = 2;
+ }
+
+ // If not specified, lookups and ancestor queries default to
+ // `read_consistency`=`STRONG`, global queries default to
+ // `read_consistency`=`EVENTUAL`.
+ oneof consistency_type {
+ // The non-transactional read consistency to use.
+ // Cannot be set to `STRONG` for global queries.
+ ReadConsistency read_consistency = 1;
+
+ // The identifier of the transaction in which to read. A
+ // transaction identifier is returned by a call to
+ // [Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction].
+ bytes transaction = 2;
+ }
+}
diff --git a/third_party/googleapis/google/datastore/v1/datastore_gapic.yaml b/third_party/googleapis/google/datastore/v1/datastore_gapic.yaml
new file mode 100644
index 0000000000..245f03f466
--- /dev/null
+++ b/third_party/googleapis/google/datastore/v1/datastore_gapic.yaml
@@ -0,0 +1,121 @@
+type: com.google.api.codegen.ConfigProto
+language_settings:
+ java:
+ package_name: com.google.cloud.datastore.spi.v1
+ python:
+ package_name: google.cloud.gapic.datastore.v1
+ go:
+ package_name: cloud.google.com/go/datastore/apiv1
+ csharp:
+ package_name: Google.Cloud.Datastore.V1
+ ruby:
+ package_name: Google::Cloud::Datastore::V1
+ php:
+ package_name: Google\Cloud\Datastore\V1
+ nodejs:
+ package_name: datastore.v1
+ domain_layer_location: google-cloud
+license_header:
+ copyright_file: copyright-google.txt
+ license_file: license-header-apache-2.0.txt
+interfaces:
+- name: google.datastore.v1.Datastore
+ collections: []
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 60000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 60000
+ total_timeout_millis: 600000
+ methods:
+ - name: Lookup
+ flattening:
+ groups:
+ - parameters:
+ - project_id
+ - read_options
+ - keys
+ required_fields:
+ - project_id
+ - keys
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: RunQuery
+ # TODO: Add flattening with oneof when oneofs implemented
+ required_fields:
+ - project_id
+ - partition_id
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: BeginTransaction
+ flattening:
+ groups:
+ - parameters:
+ - project_id
+ required_fields:
+ - project_id
+ request_object_method: false
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: Commit
+ flattening:
+ groups:
+ - parameters:
+ - project_id
+ - mode
+ - transaction
+ - mutations
+ - parameters:
+ - project_id
+ - mode
+ - mutations
+ required_fields:
+ - project_id
+ - mode
+ - mutations
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: Rollback
+ flattening:
+ groups:
+ - parameters:
+ - project_id
+ - transaction
+ required_fields:
+ - project_id
+ - transaction
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: AllocateIds
+ flattening:
+ groups:
+ - parameters:
+ - project_id
+ - keys
+ required_fields:
+ - project_id
+ - keys
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 60000
diff --git a/third_party/googleapis/google/datastore/v1/entity.proto b/third_party/googleapis/google/datastore/v1/entity.proto
new file mode 100644
index 0000000000..6df3ef2ab2
--- /dev/null
+++ b/third_party/googleapis/google/datastore/v1/entity.proto
@@ -0,0 +1,203 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.datastore.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/struct.proto";
+import "google/protobuf/timestamp.proto";
+import "google/type/latlng.proto";
+
+option csharp_namespace = "Google.Cloud.Datastore.V1";
+option go_package = "google.golang.org/genproto/googleapis/datastore/v1;datastore";
+option java_multiple_files = true;
+option java_outer_classname = "EntityProto";
+option java_package = "com.google.datastore.v1";
+
+
+// A partition ID identifies a grouping of entities. The grouping is always
+// by project and namespace, however the namespace ID may be empty.
+//
+// A partition ID contains several dimensions:
+// project ID and namespace ID.
+//
+// Partition dimensions:
+//
+// - May be `""`.
+// - Must be valid UTF-8 bytes.
+// - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}`
+// If the value of any dimension matches regex `__.*__`, the partition is
+// reserved/read-only.
+// A reserved/read-only partition ID is forbidden in certain documented
+// contexts.
+//
+// Foreign partition IDs (in which the project ID does
+// not match the context project ID ) are discouraged.
+// Reads and writes of foreign partition IDs may fail if the project is not in an active state.
+message PartitionId {
+ // The ID of the project to which the entities belong.
+ string project_id = 2;
+
+ // If not empty, the ID of the namespace to which the entities belong.
+ string namespace_id = 4;
+}
+
+// A unique identifier for an entity.
+// If a key's partition ID or any of its path kinds or names are
+// reserved/read-only, the key is reserved/read-only.
+// A reserved/read-only key is forbidden in certain documented contexts.
+message Key {
+ // A (kind, ID/name) pair used to construct a key path.
+ //
+ // If either name or ID is set, the element is complete.
+ // If neither is set, the element is incomplete.
+ message PathElement {
+ // The kind of the entity.
+ // A kind matching regex `__.*__` is reserved/read-only.
+ // A kind must not contain more than 1500 bytes when UTF-8 encoded.
+ // Cannot be `""`.
+ string kind = 1;
+
+ // The type of ID.
+ oneof id_type {
+ // The auto-allocated ID of the entity.
+ // Never equal to zero. Values less than zero are discouraged and may not
+ // be supported in the future.
+ int64 id = 2;
+
+ // The name of the entity.
+ // A name matching regex `__.*__` is reserved/read-only.
+ // A name must not be more than 1500 bytes when UTF-8 encoded.
+ // Cannot be `""`.
+ string name = 3;
+ }
+ }
+
+ // Entities are partitioned into subsets, currently identified by a project
+ // ID and namespace ID.
+ // Queries are scoped to a single partition.
+ PartitionId partition_id = 1;
+
+ // The entity path.
+ // An entity path consists of one or more elements composed of a kind and a
+ // string or numerical identifier, which identify entities. The first
+ // element identifies a _root entity_, the second element identifies
+ // a _child_ of the root entity, the third element identifies a child of the
+ // second entity, and so forth. The entities identified by all prefixes of
+ // the path are called the element's _ancestors_.
+ //
+ // An entity path is always fully complete: *all* of the entity's ancestors
+ // are required to be in the path along with the entity identifier itself.
+ // The only exception is that in some documented cases, the identifier in the
+ // last path element (for the entity) itself may be omitted. For example,
+ // the last path element of the key of `Mutation.insert` may have no
+ // identifier.
+ //
+ // A path can never be empty, and a path can have at most 100 elements.
+ repeated PathElement path = 2;
+}
+
+// An array value.
+message ArrayValue {
+ // Values in the array.
+ // The order of this array may not be preserved if it contains a mix of
+ // indexed and unindexed values.
+ repeated Value values = 1;
+}
+
+// A message that can hold any of the supported value types and associated
+// metadata.
+message Value {
+ // Must have a value set.
+ oneof value_type {
+ // A null value.
+ google.protobuf.NullValue null_value = 11;
+
+ // A boolean value.
+ bool boolean_value = 1;
+
+ // An integer value.
+ int64 integer_value = 2;
+
+ // A double value.
+ double double_value = 3;
+
+ // A timestamp value.
+ // When stored in the Datastore, precise only to microseconds;
+ // any additional precision is rounded down.
+ google.protobuf.Timestamp timestamp_value = 10;
+
+ // A key value.
+ Key key_value = 5;
+
+ // A UTF-8 encoded string value.
+ // When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes.
+ // Otherwise, may be set to at least 1,000,000 bytes.
+ string string_value = 17;
+
+ // A blob value.
+ // May have at most 1,000,000 bytes.
+ // When `exclude_from_indexes` is false, may have at most 1500 bytes.
+ // In JSON requests, must be base64-encoded.
+ bytes blob_value = 18;
+
+ // A geo point value representing a point on the surface of Earth.
+ google.type.LatLng geo_point_value = 8;
+
+ // An entity value.
+ //
+ // - May have no key.
+ // - May have a key with an incomplete key path.
+ // - May have a reserved/read-only key.
+ Entity entity_value = 6;
+
+ // An array value.
+ // Cannot contain another array value.
+ // A `Value` instance that sets field `array_value` must not set fields
+ // `meaning` or `exclude_from_indexes`.
+ ArrayValue array_value = 9;
+ }
+
+ // The `meaning` field should only be populated for backwards compatibility.
+ int32 meaning = 14;
+
+ // If the value should be excluded from all indexes including those defined
+ // explicitly.
+ bool exclude_from_indexes = 19;
+}
+
+// A Datastore data object.
+//
+// An entity is limited to 1 megabyte when stored. That _roughly_
+// corresponds to a limit of 1 megabyte for the serialized form of this
+// message.
+message Entity {
+ // The entity's key.
+ //
+ // An entity must have a key, unless otherwise documented (for example,
+ // an entity in `Value.entity_value` may have no key).
+ // An entity's kind is its key path's last element's kind,
+ // or null if it has no key.
+ Key key = 1;
+
+ // The entity's properties.
+ // The map's keys are property names.
+ // A property name matching regex `__.*__` is reserved.
+ // A reserved property name is forbidden in certain documented contexts.
+ // The name must not contain more than 500 characters.
+ // The name cannot be `""`.
+ map<string, Value> properties = 3;
+}
diff --git a/third_party/googleapis/google/datastore/v1/query.proto b/third_party/googleapis/google/datastore/v1/query.proto
new file mode 100644
index 0000000000..483e12565e
--- /dev/null
+++ b/third_party/googleapis/google/datastore/v1/query.proto
@@ -0,0 +1,309 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.datastore.v1;
+
+import "google/api/annotations.proto";
+import "google/datastore/v1/entity.proto";
+import "google/protobuf/wrappers.proto";
+import "google/type/latlng.proto";
+
+option csharp_namespace = "Google.Cloud.Datastore.V1";
+option go_package = "google.golang.org/genproto/googleapis/datastore/v1;datastore";
+option java_multiple_files = true;
+option java_outer_classname = "QueryProto";
+option java_package = "com.google.datastore.v1";
+
+
+// The result of fetching an entity from Datastore.
+message EntityResult {
+ // Specifies what data the 'entity' field contains.
+ // A `ResultType` is either implied (for example, in `LookupResponse.missing`
+ // from `datastore.proto`, it is always `KEY_ONLY`) or specified by context
+ // (for example, in message `QueryResultBatch`, field `entity_result_type`
+ // specifies a `ResultType` for all the values in field `entity_results`).
+ enum ResultType {
+ // Unspecified. This value is never used.
+ RESULT_TYPE_UNSPECIFIED = 0;
+
+ // The key and properties.
+ FULL = 1;
+
+ // A projected subset of properties. The entity may have no key.
+ PROJECTION = 2;
+
+ // Only the key.
+ KEY_ONLY = 3;
+ }
+
+ // The resulting entity.
+ Entity entity = 1;
+
+ // The version of the entity, a strictly positive number that monotonically
+ // increases with changes to the entity.
+ //
+ // This field is set for [`FULL`][google.datastore.v1.EntityResult.ResultType.FULL] entity
+ // results.
+ //
+ // For [missing][google.datastore.v1.LookupResponse.missing] entities in `LookupResponse`, this
+ // is the version of the snapshot that was used to look up the entity, and it
+ // is always set except for eventually consistent reads.
+ int64 version = 4;
+
+ // A cursor that points to the position after the result entity.
+ // Set only when the `EntityResult` is part of a `QueryResultBatch` message.
+ bytes cursor = 3;
+}
+
+// A query for entities.
+message Query {
+ // The projection to return. Defaults to returning all properties.
+ repeated Projection projection = 2;
+
+ // The kinds to query (if empty, returns entities of all kinds).
+ // Currently at most 1 kind may be specified.
+ repeated KindExpression kind = 3;
+
+ // The filter to apply.
+ Filter filter = 4;
+
+ // The order to apply to the query results (if empty, order is unspecified).
+ repeated PropertyOrder order = 5;
+
+ // The properties to make distinct. The query results will contain the first
+ // result for each distinct combination of values for the given properties
+ // (if empty, all results are returned).
+ repeated PropertyReference distinct_on = 6;
+
+ // A starting point for the query results. Query cursors are
+ // returned in query result batches and
+ // [can only be used to continue the same query](https://cloud.google.com/datastore/docs/concepts/queries#cursors_limits_and_offsets).
+ bytes start_cursor = 7;
+
+ // An ending point for the query results. Query cursors are
+ // returned in query result batches and
+ // [can only be used to limit the same query](https://cloud.google.com/datastore/docs/concepts/queries#cursors_limits_and_offsets).
+ bytes end_cursor = 8;
+
+ // The number of results to skip. Applies before limit, but after all other
+ // constraints. Optional. Must be >= 0 if specified.
+ int32 offset = 10;
+
+ // The maximum number of results to return. Applies after all other
+ // constraints. Optional.
+ // Unspecified is interpreted as no limit.
+ // Must be >= 0 if specified.
+ google.protobuf.Int32Value limit = 12;
+}
+
+// A representation of a kind.
+message KindExpression {
+ // The name of the kind.
+ string name = 1;
+}
+
+// A reference to a property relative to the kind expressions.
+message PropertyReference {
+ // The name of the property.
+ // If name includes "."s, it may be interpreted as a property name path.
+ string name = 2;
+}
+
+// A representation of a property in a projection.
+message Projection {
+ // The property to project.
+ PropertyReference property = 1;
+}
+
+// The desired order for a specific property.
+message PropertyOrder {
+ // The sort direction.
+ enum Direction {
+ // Unspecified. This value must not be used.
+ DIRECTION_UNSPECIFIED = 0;
+
+ // Ascending.
+ ASCENDING = 1;
+
+ // Descending.
+ DESCENDING = 2;
+ }
+
+ // The property to order by.
+ PropertyReference property = 1;
+
+ // The direction to order by. Defaults to `ASCENDING`.
+ Direction direction = 2;
+}
+
+// A holder for any type of filter.
+message Filter {
+ // The type of filter.
+ oneof filter_type {
+ // A composite filter.
+ CompositeFilter composite_filter = 1;
+
+ // A filter on a property.
+ PropertyFilter property_filter = 2;
+ }
+}
+
+// A filter that merges multiple other filters using the given operator.
+message CompositeFilter {
+ // A composite filter operator.
+ enum Operator {
+ // Unspecified. This value must not be used.
+ OPERATOR_UNSPECIFIED = 0;
+
+ // The results are required to satisfy each of the combined filters.
+ AND = 1;
+ }
+
+ // The operator for combining multiple filters.
+ Operator op = 1;
+
+ // The list of filters to combine.
+ // Must contain at least one filter.
+ repeated Filter filters = 2;
+}
+
+// A filter on a specific property.
+message PropertyFilter {
+ // A property filter operator.
+ enum Operator {
+ // Unspecified. This value must not be used.
+ OPERATOR_UNSPECIFIED = 0;
+
+ // Less than.
+ LESS_THAN = 1;
+
+ // Less than or equal.
+ LESS_THAN_OR_EQUAL = 2;
+
+ // Greater than.
+ GREATER_THAN = 3;
+
+ // Greater than or equal.
+ GREATER_THAN_OR_EQUAL = 4;
+
+ // Equal.
+ EQUAL = 5;
+
+ // Has ancestor.
+ HAS_ANCESTOR = 11;
+ }
+
+ // The property to filter by.
+ PropertyReference property = 1;
+
+ // The operator to filter by.
+ Operator op = 2;
+
+ // The value to compare the property to.
+ Value value = 3;
+}
+
+// A [GQL query](https://cloud.google.com/datastore/docs/apis/gql/gql_reference).
+message GqlQuery {
+ // A string of the format described
+ // [here](https://cloud.google.com/datastore/docs/apis/gql/gql_reference).
+ string query_string = 1;
+
+ // When false, the query string must not contain any literals and instead must
+ // bind all values. For example,
+ // `SELECT * FROM Kind WHERE a = 'string literal'` is not allowed, while
+ // `SELECT * FROM Kind WHERE a = @value` is.
+ bool allow_literals = 2;
+
+ // For each non-reserved named binding site in the query string, there must be
+ // a named parameter with that name, but not necessarily the inverse.
+ //
+ // Key must match regex `[A-Za-z_$][A-Za-z_$0-9]*`, must not match regex
+ // `__.*__`, and must not be `""`.
+ map<string, GqlQueryParameter> named_bindings = 5;
+
+ // Numbered binding site @1 references the first numbered parameter,
+ // effectively using 1-based indexing, rather than the usual 0.
+ //
+ // For each binding site numbered i in `query_string`, there must be an i-th
+ // numbered parameter. The inverse must also be true.
+ repeated GqlQueryParameter positional_bindings = 4;
+}
+
+// A binding parameter for a GQL query.
+message GqlQueryParameter {
+ // The type of parameter.
+ oneof parameter_type {
+ // A value parameter.
+ Value value = 2;
+
+ // A query cursor. Query cursors are returned in query
+ // result batches.
+ bytes cursor = 3;
+ }
+}
+
+// A batch of results produced by a query.
+message QueryResultBatch {
+ // The possible values for the `more_results` field.
+ enum MoreResultsType {
+ // Unspecified. This value is never used.
+ MORE_RESULTS_TYPE_UNSPECIFIED = 0;
+
+ // There may be additional batches to fetch from this query.
+ NOT_FINISHED = 1;
+
+ // The query is finished, but there may be more results after the limit.
+ MORE_RESULTS_AFTER_LIMIT = 2;
+
+ // The query is finished, but there may be more results after the end
+ // cursor.
+ MORE_RESULTS_AFTER_CURSOR = 4;
+
+ // The query has been exhausted.
+ NO_MORE_RESULTS = 3;
+ }
+
+ // The number of results skipped, typically because of an offset.
+ int32 skipped_results = 6;
+
+ // A cursor that points to the position after the last skipped result.
+ // Will be set when `skipped_results` != 0.
+ bytes skipped_cursor = 3;
+
+ // The result type for every entity in `entity_results`.
+ EntityResult.ResultType entity_result_type = 1;
+
+ // The results for this batch.
+ repeated EntityResult entity_results = 2;
+
+ // A cursor that points to the position after the last result in the batch.
+ bytes end_cursor = 4;
+
+ // The state of the query after the current batch.
+ MoreResultsType more_results = 5;
+
+ // The version number of the snapshot this batch was returned from.
+ // This applies to the range of results from the query's `start_cursor` (or
+ // the beginning of the query if no cursor was given) to this batch's
+ // `end_cursor` (not the query's `end_cursor`).
+ //
+ // In a single transaction, subsequent query result batches for the same query
+ // can have a greater snapshot version number. Each batch's snapshot version
+ // is valid for all preceding batches.
+ // The value will be zero for eventually consistent queries.
+ int64 snapshot_version = 7;
+}
diff --git a/third_party/googleapis/google/datastore/v1beta3/datastore.proto b/third_party/googleapis/google/datastore/v1beta3/datastore.proto
new file mode 100644
index 0000000000..577d96de64
--- /dev/null
+++ b/third_party/googleapis/google/datastore/v1beta3/datastore.proto
@@ -0,0 +1,318 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.datastore.v1beta3;
+
+import "google/api/annotations.proto";
+import "google/datastore/v1beta3/entity.proto";
+import "google/datastore/v1beta3/query.proto";
+
+option csharp_namespace = "Google.Cloud.Datastore.V1Beta3";
+option go_package = "google.golang.org/genproto/googleapis/datastore/v1beta3;datastore";
+option java_multiple_files = true;
+option java_outer_classname = "DatastoreProto";
+option java_package = "com.google.datastore.v1beta3";
+
+
+// Each RPC normalizes the partition IDs of the keys in its input entities,
+// and always returns entities with keys with normalized partition IDs.
+// This applies to all keys and entities, including those in values, except keys
+// with both an empty path and an empty or unset partition ID. Normalization of
+// input keys sets the project ID (if not already set) to the project ID from
+// the request.
+//
+service Datastore {
+ // Looks up entities by key.
+ rpc Lookup(LookupRequest) returns (LookupResponse) {
+ option (google.api.http) = { post: "/v1beta3/projects/{project_id}:lookup" body: "*" };
+ }
+
+ // Queries for entities.
+ rpc RunQuery(RunQueryRequest) returns (RunQueryResponse) {
+ option (google.api.http) = { post: "/v1beta3/projects/{project_id}:runQuery" body: "*" };
+ }
+
+ // Begins a new transaction.
+ rpc BeginTransaction(BeginTransactionRequest) returns (BeginTransactionResponse) {
+ option (google.api.http) = { post: "/v1beta3/projects/{project_id}:beginTransaction" body: "*" };
+ }
+
+ // Commits a transaction, optionally creating, deleting or modifying some
+ // entities.
+ rpc Commit(CommitRequest) returns (CommitResponse) {
+ option (google.api.http) = { post: "/v1beta3/projects/{project_id}:commit" body: "*" };
+ }
+
+ // Rolls back a transaction.
+ rpc Rollback(RollbackRequest) returns (RollbackResponse) {
+ option (google.api.http) = { post: "/v1beta3/projects/{project_id}:rollback" body: "*" };
+ }
+
+ // Allocates IDs for the given keys, which is useful for referencing an entity
+ // before it is inserted.
+ rpc AllocateIds(AllocateIdsRequest) returns (AllocateIdsResponse) {
+ option (google.api.http) = { post: "/v1beta3/projects/{project_id}:allocateIds" body: "*" };
+ }
+}
+
+// The request for [Datastore.Lookup][google.datastore.v1beta3.Datastore.Lookup].
+message LookupRequest {
+ // The ID of the project against which to make the request.
+ string project_id = 8;
+
+ // The options for this lookup request.
+ ReadOptions read_options = 1;
+
+ // Keys of entities to look up.
+ repeated Key keys = 3;
+}
+
+// The response for [Datastore.Lookup][google.datastore.v1beta3.Datastore.Lookup].
+message LookupResponse {
+ // Entities found as `ResultType.FULL` entities. The order of results in this
+ // field is undefined and has no relation to the order of the keys in the
+ // input.
+ repeated EntityResult found = 1;
+
+ // Entities not found as `ResultType.KEY_ONLY` entities. The order of results
+ // in this field is undefined and has no relation to the order of the keys
+ // in the input.
+ repeated EntityResult missing = 2;
+
+ // A list of keys that were not looked up due to resource constraints. The
+ // order of results in this field is undefined and has no relation to the
+ // order of the keys in the input.
+ repeated Key deferred = 3;
+}
+
+// The request for [Datastore.RunQuery][google.datastore.v1beta3.Datastore.RunQuery].
+message RunQueryRequest {
+ // The ID of the project against which to make the request.
+ string project_id = 8;
+
+ // Entities are partitioned into subsets, identified by a partition ID.
+ // Queries are scoped to a single partition.
+ // This partition ID is normalized with the standard default context
+ // partition ID.
+ PartitionId partition_id = 2;
+
+ // The options for this query.
+ ReadOptions read_options = 1;
+
+ // The type of query.
+ oneof query_type {
+ // The query to run.
+ Query query = 3;
+
+ // The GQL query to run.
+ GqlQuery gql_query = 7;
+ }
+}
+
+// The response for [Datastore.RunQuery][google.datastore.v1beta3.Datastore.RunQuery].
+message RunQueryResponse {
+ // A batch of query results (always present).
+ QueryResultBatch batch = 1;
+
+ // The parsed form of the `GqlQuery` from the request, if it was set.
+ Query query = 2;
+}
+
+// The request for [Datastore.BeginTransaction][google.datastore.v1beta3.Datastore.BeginTransaction].
+message BeginTransactionRequest {
+ // The ID of the project against which to make the request.
+ string project_id = 8;
+}
+
+// The response for [Datastore.BeginTransaction][google.datastore.v1beta3.Datastore.BeginTransaction].
+message BeginTransactionResponse {
+ // The transaction identifier (always present).
+ bytes transaction = 1;
+}
+
+// The request for [Datastore.Rollback][google.datastore.v1beta3.Datastore.Rollback].
+message RollbackRequest {
+ // The ID of the project against which to make the request.
+ string project_id = 8;
+
+ // The transaction identifier, returned by a call to
+ // [Datastore.BeginTransaction][google.datastore.v1beta3.Datastore.BeginTransaction].
+ bytes transaction = 1;
+}
+
+// The response for [Datastore.Rollback][google.datastore.v1beta3.Datastore.Rollback].
+// (an empty message).
+message RollbackResponse {
+
+}
+
+// The request for [Datastore.Commit][google.datastore.v1beta3.Datastore.Commit].
+message CommitRequest {
+ // The modes available for commits.
+ enum Mode {
+ // Unspecified. This value must not be used.
+ MODE_UNSPECIFIED = 0;
+
+ // Transactional: The mutations are either all applied, or none are applied.
+ // Learn about transactions [here](https://cloud.google.com/datastore/docs/concepts/transactions).
+ TRANSACTIONAL = 1;
+
+ // Non-transactional: The mutations may not apply as all or none.
+ NON_TRANSACTIONAL = 2;
+ }
+
+ // The ID of the project against which to make the request.
+ string project_id = 8;
+
+ // The type of commit to perform. Defaults to `TRANSACTIONAL`.
+ Mode mode = 5;
+
+ // Must be set when mode is `TRANSACTIONAL`.
+ oneof transaction_selector {
+ // The identifier of the transaction associated with the commit. A
+ // transaction identifier is returned by a call to
+ // [Datastore.BeginTransaction][google.datastore.v1beta3.Datastore.BeginTransaction].
+ bytes transaction = 1;
+ }
+
+ // The mutations to perform.
+ //
+ // When mode is `TRANSACTIONAL`, mutations affecting a single entity are
+ // applied in order. The following sequences of mutations affecting a single
+ // entity are not permitted in a single `Commit` request:
+ //
+ // - `insert` followed by `insert`
+ // - `update` followed by `insert`
+ // - `upsert` followed by `insert`
+ // - `delete` followed by `update`
+ //
+ // When mode is `NON_TRANSACTIONAL`, no two mutations may affect a single
+ // entity.
+ repeated Mutation mutations = 6;
+}
+
+// The response for [Datastore.Commit][google.datastore.v1beta3.Datastore.Commit].
+message CommitResponse {
+ // The result of performing the mutations.
+ // The i-th mutation result corresponds to the i-th mutation in the request.
+ repeated MutationResult mutation_results = 3;
+
+ // The number of index entries updated during the commit, or zero if none were
+ // updated.
+ int32 index_updates = 4;
+}
+
+// The request for [Datastore.AllocateIds][google.datastore.v1beta3.Datastore.AllocateIds].
+message AllocateIdsRequest {
+ // The ID of the project against which to make the request.
+ string project_id = 8;
+
+ // A list of keys with incomplete key paths for which to allocate IDs.
+ // No key may be reserved/read-only.
+ repeated Key keys = 1;
+}
+
+// The response for [Datastore.AllocateIds][google.datastore.v1beta3.Datastore.AllocateIds].
+message AllocateIdsResponse {
+ // The keys specified in the request (in the same order), each with
+ // its key path completed with a newly allocated ID.
+ repeated Key keys = 1;
+}
+
+// A mutation to apply to an entity.
+message Mutation {
+ // The mutation operation.
+ //
+ // For `insert`, `update`, and `upsert`:
+ // - The entity's key must not be reserved/read-only.
+ // - No property in the entity may have a reserved name,
+ // not even a property in an entity in a value.
+ // - No value in the entity may have meaning 18,
+ // not even a value in an entity in another value.
+ oneof operation {
+ // The entity to insert. The entity must not already exist.
+ // The entity key's final path element may be incomplete.
+ Entity insert = 4;
+
+ // The entity to update. The entity must already exist.
+ // Must have a complete key path.
+ Entity update = 5;
+
+ // The entity to upsert. The entity may or may not already exist.
+ // The entity key's final path element may be incomplete.
+ Entity upsert = 6;
+
+ // The key of the entity to delete. The entity may or may not already exist.
+ // Must have a complete key path and must not be reserved/read-only.
+ Key delete = 7;
+ }
+
+ // When set, the server will detect whether or not this mutation conflicts
+ // with the current version of the entity on the server. Conflicting mutations
+ // are not applied, and are marked as such in MutationResult.
+ oneof conflict_detection_strategy {
+ // The version of the entity that this mutation is being applied to. If this
+ // does not match the current version on the server, the mutation conflicts.
+ int64 base_version = 8;
+ }
+}
+
+// The result of applying a mutation.
+message MutationResult {
+ // The automatically allocated key.
+ // Set only when the mutation allocated a key.
+ Key key = 3;
+
+ // The version of the entity on the server after processing the mutation. If
+ // the mutation doesn't change anything on the server, then the version will
+ // be the version of the current entity or, if no entity is present, a version
+ // that is strictly greater than the version of any previous entity and less
+ // than the version of any possible future entity.
+ int64 version = 4;
+
+ // Whether a conflict was detected for this mutation. Always false when a
+ // conflict detection strategy field is not set in the mutation.
+ bool conflict_detected = 5;
+}
+
+// The options shared by read requests.
+message ReadOptions {
+ // The possible values for read consistencies.
+ enum ReadConsistency {
+ // Unspecified. This value must not be used.
+ READ_CONSISTENCY_UNSPECIFIED = 0;
+
+ // Strong consistency.
+ STRONG = 1;
+
+ // Eventual consistency.
+ EVENTUAL = 2;
+ }
+
+ // If not specified, lookups and ancestor queries default to
+ // `read_consistency`=`STRONG`, global queries default to
+ // `read_consistency`=`EVENTUAL`.
+ oneof consistency_type {
+ // The non-transactional read consistency to use.
+ // Cannot be set to `STRONG` for global queries.
+ ReadConsistency read_consistency = 1;
+
+ // The identifier of the transaction in which to read. A
+ // transaction identifier is returned by a call to
+ // [Datastore.BeginTransaction][google.datastore.v1beta3.Datastore.BeginTransaction].
+ bytes transaction = 2;
+ }
+}
diff --git a/third_party/googleapis/google/datastore/v1beta3/entity.proto b/third_party/googleapis/google/datastore/v1beta3/entity.proto
new file mode 100644
index 0000000000..4182ca4302
--- /dev/null
+++ b/third_party/googleapis/google/datastore/v1beta3/entity.proto
@@ -0,0 +1,203 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.datastore.v1beta3;
+
+import "google/api/annotations.proto";
+import "google/protobuf/struct.proto";
+import "google/protobuf/timestamp.proto";
+import "google/type/latlng.proto";
+
+option csharp_namespace = "Google.Cloud.Datastore.V1Beta3";
+option go_package = "google.golang.org/genproto/googleapis/datastore/v1beta3;datastore";
+option java_multiple_files = true;
+option java_outer_classname = "EntityProto";
+option java_package = "com.google.datastore.v1beta3";
+
+
+// A partition ID identifies a grouping of entities. The grouping is always
+// by project and namespace, however the namespace ID may be empty.
+//
+// A partition ID contains several dimensions:
+// project ID and namespace ID.
+//
+// Partition dimensions:
+//
+// - May be `""`.
+// - Must be valid UTF-8 bytes.
+// - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}`
+// If the value of any dimension matches regex `__.*__`, the partition is
+// reserved/read-only.
+// A reserved/read-only partition ID is forbidden in certain documented
+// contexts.
+//
+// Foreign partition IDs (in which the project ID does
+// not match the context project ID ) are discouraged.
+// Reads and writes of foreign partition IDs may fail if the project is not in an active state.
+message PartitionId {
+ // The ID of the project to which the entities belong.
+ string project_id = 2;
+
+ // If not empty, the ID of the namespace to which the entities belong.
+ string namespace_id = 4;
+}
+
+// A unique identifier for an entity.
+// If a key's partition ID or any of its path kinds or names are
+// reserved/read-only, the key is reserved/read-only.
+// A reserved/read-only key is forbidden in certain documented contexts.
+message Key {
+ // A (kind, ID/name) pair used to construct a key path.
+ //
+ // If either name or ID is set, the element is complete.
+ // If neither is set, the element is incomplete.
+ message PathElement {
+ // The kind of the entity.
+ // A kind matching regex `__.*__` is reserved/read-only.
+ // A kind must not contain more than 1500 bytes when UTF-8 encoded.
+ // Cannot be `""`.
+ string kind = 1;
+
+ // The type of ID.
+ oneof id_type {
+ // The auto-allocated ID of the entity.
+ // Never equal to zero. Values less than zero are discouraged and may not
+ // be supported in the future.
+ int64 id = 2;
+
+ // The name of the entity.
+ // A name matching regex `__.*__` is reserved/read-only.
+ // A name must not be more than 1500 bytes when UTF-8 encoded.
+ // Cannot be `""`.
+ string name = 3;
+ }
+ }
+
+ // Entities are partitioned into subsets, currently identified by a project
+ // ID and namespace ID.
+ // Queries are scoped to a single partition.
+ PartitionId partition_id = 1;
+
+ // The entity path.
+ // An entity path consists of one or more elements composed of a kind and a
+ // string or numerical identifier, which identify entities. The first
+ // element identifies a _root entity_, the second element identifies
+ // a _child_ of the root entity, the third element identifies a child of the
+ // second entity, and so forth. The entities identified by all prefixes of
+ // the path are called the element's _ancestors_.
+ //
+ // An entity path is always fully complete: *all* of the entity's ancestors
+ // are required to be in the path along with the entity identifier itself.
+ // The only exception is that in some documented cases, the identifier in the
+ // last path element (for the entity) itself may be omitted. For example,
+ // the last path element of the key of `Mutation.insert` may have no
+ // identifier.
+ //
+ // A path can never be empty, and a path can have at most 100 elements.
+ repeated PathElement path = 2;
+}
+
+// An array value.
+message ArrayValue {
+ // Values in the array.
+ // The order of this array may not be preserved if it contains a mix of
+ // indexed and unindexed values.
+ repeated Value values = 1;
+}
+
+// A message that can hold any of the supported value types and associated
+// metadata.
+message Value {
+ // Must have a value set.
+ oneof value_type {
+ // A null value.
+ google.protobuf.NullValue null_value = 11;
+
+ // A boolean value.
+ bool boolean_value = 1;
+
+ // An integer value.
+ int64 integer_value = 2;
+
+ // A double value.
+ double double_value = 3;
+
+ // A timestamp value.
+ // When stored in the Datastore, precise only to microseconds;
+ // any additional precision is rounded down.
+ google.protobuf.Timestamp timestamp_value = 10;
+
+ // A key value.
+ Key key_value = 5;
+
+ // A UTF-8 encoded string value.
+ // When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes.
+ // Otherwise, may be set to at least 1,000,000 bytes.
+ string string_value = 17;
+
+ // A blob value.
+ // May have at most 1,000,000 bytes.
+ // When `exclude_from_indexes` is false, may have at most 1500 bytes.
+ // In JSON requests, must be base64-encoded.
+ bytes blob_value = 18;
+
+ // A geo point value representing a point on the surface of Earth.
+ google.type.LatLng geo_point_value = 8;
+
+ // An entity value.
+ //
+ // - May have no key.
+ // - May have a key with an incomplete key path.
+ // - May have a reserved/read-only key.
+ Entity entity_value = 6;
+
+ // An array value.
+ // Cannot contain another array value.
+ // A `Value` instance that sets field `array_value` must not set fields
+ // `meaning` or `exclude_from_indexes`.
+ ArrayValue array_value = 9;
+ }
+
+ // The `meaning` field should only be populated for backwards compatibility.
+ int32 meaning = 14;
+
+ // If the value should be excluded from all indexes including those defined
+ // explicitly.
+ bool exclude_from_indexes = 19;
+}
+
+// A Datastore data object.
+//
+// An entity is limited to 1 megabyte when stored. That _roughly_
+// corresponds to a limit of 1 megabyte for the serialized form of this
+// message.
+message Entity {
+ // The entity's key.
+ //
+ // An entity must have a key, unless otherwise documented (for example,
+ // an entity in `Value.entity_value` may have no key).
+ // An entity's kind is its key path's last element's kind,
+ // or null if it has no key.
+ Key key = 1;
+
+ // The entity's properties.
+ // The map's keys are property names.
+ // A property name matching regex `__.*__` is reserved.
+ // A reserved property name is forbidden in certain documented contexts.
+ // The name must not contain more than 500 characters.
+ // The name cannot be `""`.
+ map<string, Value> properties = 3;
+}
diff --git a/third_party/googleapis/google/datastore/v1beta3/query.proto b/third_party/googleapis/google/datastore/v1beta3/query.proto
new file mode 100644
index 0000000000..61af6ad276
--- /dev/null
+++ b/third_party/googleapis/google/datastore/v1beta3/query.proto
@@ -0,0 +1,309 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.datastore.v1beta3;
+
+import "google/api/annotations.proto";
+import "google/datastore/v1beta3/entity.proto";
+import "google/protobuf/wrappers.proto";
+import "google/type/latlng.proto";
+
+option csharp_namespace = "Google.Cloud.Datastore.V1Beta3";
+option go_package = "google.golang.org/genproto/googleapis/datastore/v1beta3;datastore";
+option java_multiple_files = true;
+option java_outer_classname = "QueryProto";
+option java_package = "com.google.datastore.v1beta3";
+
+
+// The result of fetching an entity from Datastore.
+message EntityResult {
+ // Specifies what data the 'entity' field contains.
+ // A `ResultType` is either implied (for example, in `LookupResponse.missing`
+ // from `datastore.proto`, it is always `KEY_ONLY`) or specified by context
+ // (for example, in message `QueryResultBatch`, field `entity_result_type`
+ // specifies a `ResultType` for all the values in field `entity_results`).
+ enum ResultType {
+ // Unspecified. This value is never used.
+ RESULT_TYPE_UNSPECIFIED = 0;
+
+ // The key and properties.
+ FULL = 1;
+
+ // A projected subset of properties. The entity may have no key.
+ PROJECTION = 2;
+
+ // Only the key.
+ KEY_ONLY = 3;
+ }
+
+ // The resulting entity.
+ Entity entity = 1;
+
+ // The version of the entity, a strictly positive number that monotonically
+ // increases with changes to the entity.
+ //
+ // This field is set for [`FULL`][google.datastore.v1beta3.EntityResult.ResultType.FULL] entity
+ // results.
+ //
+ // For [missing][google.datastore.v1beta3.LookupResponse.missing] entities in `LookupResponse`, this
+ // is the version of the snapshot that was used to look up the entity, and it
+ // is always set except for eventually consistent reads.
+ int64 version = 4;
+
+ // A cursor that points to the position after the result entity.
+ // Set only when the `EntityResult` is part of a `QueryResultBatch` message.
+ bytes cursor = 3;
+}
+
+// A query for entities.
+message Query {
+ // The projection to return. Defaults to returning all properties.
+ repeated Projection projection = 2;
+
+ // The kinds to query (if empty, returns entities of all kinds).
+ // Currently at most 1 kind may be specified.
+ repeated KindExpression kind = 3;
+
+ // The filter to apply.
+ Filter filter = 4;
+
+ // The order to apply to the query results (if empty, order is unspecified).
+ repeated PropertyOrder order = 5;
+
+ // The properties to make distinct. The query results will contain the first
+ // result for each distinct combination of values for the given properties
+ // (if empty, all results are returned).
+ repeated PropertyReference distinct_on = 6;
+
+ // A starting point for the query results. Query cursors are
+ // returned in query result batches and
+ // [can only be used to continue the same query](https://cloud.google.com/datastore/docs/concepts/queries#cursors_limits_and_offsets).
+ bytes start_cursor = 7;
+
+ // An ending point for the query results. Query cursors are
+ // returned in query result batches and
+ // [can only be used to limit the same query](https://cloud.google.com/datastore/docs/concepts/queries#cursors_limits_and_offsets).
+ bytes end_cursor = 8;
+
+ // The number of results to skip. Applies before limit, but after all other
+ // constraints. Optional. Must be >= 0 if specified.
+ int32 offset = 10;
+
+ // The maximum number of results to return. Applies after all other
+ // constraints. Optional.
+ // Unspecified is interpreted as no limit.
+ // Must be >= 0 if specified.
+ google.protobuf.Int32Value limit = 12;
+}
+
+// A representation of a kind.
+message KindExpression {
+ // The name of the kind.
+ string name = 1;
+}
+
+// A reference to a property relative to the kind expressions.
+message PropertyReference {
+ // The name of the property.
+ // If name includes "."s, it may be interpreted as a property name path.
+ string name = 2;
+}
+
+// A representation of a property in a projection.
+message Projection {
+ // The property to project.
+ PropertyReference property = 1;
+}
+
+// The desired order for a specific property.
+message PropertyOrder {
+ // The sort direction.
+ enum Direction {
+ // Unspecified. This value must not be used.
+ DIRECTION_UNSPECIFIED = 0;
+
+ // Ascending.
+ ASCENDING = 1;
+
+ // Descending.
+ DESCENDING = 2;
+ }
+
+ // The property to order by.
+ PropertyReference property = 1;
+
+ // The direction to order by. Defaults to `ASCENDING`.
+ Direction direction = 2;
+}
+
+// A holder for any type of filter.
+message Filter {
+ // The type of filter.
+ oneof filter_type {
+ // A composite filter.
+ CompositeFilter composite_filter = 1;
+
+ // A filter on a property.
+ PropertyFilter property_filter = 2;
+ }
+}
+
+// A filter that merges multiple other filters using the given operator.
+message CompositeFilter {
+ // A composite filter operator.
+ enum Operator {
+ // Unspecified. This value must not be used.
+ OPERATOR_UNSPECIFIED = 0;
+
+ // The results are required to satisfy each of the combined filters.
+ AND = 1;
+ }
+
+ // The operator for combining multiple filters.
+ Operator op = 1;
+
+ // The list of filters to combine.
+ // Must contain at least one filter.
+ repeated Filter filters = 2;
+}
+
+// A filter on a specific property.
+message PropertyFilter {
+ // A property filter operator.
+ enum Operator {
+ // Unspecified. This value must not be used.
+ OPERATOR_UNSPECIFIED = 0;
+
+ // Less than.
+ LESS_THAN = 1;
+
+ // Less than or equal.
+ LESS_THAN_OR_EQUAL = 2;
+
+ // Greater than.
+ GREATER_THAN = 3;
+
+ // Greater than or equal.
+ GREATER_THAN_OR_EQUAL = 4;
+
+ // Equal.
+ EQUAL = 5;
+
+ // Has ancestor.
+ HAS_ANCESTOR = 11;
+ }
+
+ // The property to filter by.
+ PropertyReference property = 1;
+
+ // The operator to filter by.
+ Operator op = 2;
+
+ // The value to compare the property to.
+ Value value = 3;
+}
+
+// A [GQL query](https://cloud.google.com/datastore/docs/apis/gql/gql_reference).
+message GqlQuery {
+ // A string of the format described
+ // [here](https://cloud.google.com/datastore/docs/apis/gql/gql_reference).
+ string query_string = 1;
+
+ // When false, the query string must not contain any literals and instead must
+ // bind all values. For example,
+ // `SELECT * FROM Kind WHERE a = 'string literal'` is not allowed, while
+ // `SELECT * FROM Kind WHERE a = @value` is.
+ bool allow_literals = 2;
+
+ // For each non-reserved named binding site in the query string, there must be
+ // a named parameter with that name, but not necessarily the inverse.
+ //
+ // Key must match regex `[A-Za-z_$][A-Za-z_$0-9]*`, must not match regex
+ // `__.*__`, and must not be `""`.
+ map<string, GqlQueryParameter> named_bindings = 5;
+
+ // Numbered binding site @1 references the first numbered parameter,
+ // effectively using 1-based indexing, rather than the usual 0.
+ //
+ // For each binding site numbered i in `query_string`, there must be an i-th
+ // numbered parameter. The inverse must also be true.
+ repeated GqlQueryParameter positional_bindings = 4;
+}
+
+// A binding parameter for a GQL query.
+message GqlQueryParameter {
+ // The type of parameter.
+ oneof parameter_type {
+ // A value parameter.
+ Value value = 2;
+
+ // A query cursor. Query cursors are returned in query
+ // result batches.
+ bytes cursor = 3;
+ }
+}
+
+// A batch of results produced by a query.
+message QueryResultBatch {
+ // The possible values for the `more_results` field.
+ enum MoreResultsType {
+ // Unspecified. This value is never used.
+ MORE_RESULTS_TYPE_UNSPECIFIED = 0;
+
+ // There may be additional batches to fetch from this query.
+ NOT_FINISHED = 1;
+
+ // The query is finished, but there may be more results after the limit.
+ MORE_RESULTS_AFTER_LIMIT = 2;
+
+ // The query is finished, but there may be more results after the end
+ // cursor.
+ MORE_RESULTS_AFTER_CURSOR = 4;
+
+ // The query has been exhausted.
+ NO_MORE_RESULTS = 3;
+ }
+
+ // The number of results skipped, typically because of an offset.
+ int32 skipped_results = 6;
+
+ // A cursor that points to the position after the last skipped result.
+ // Will be set when `skipped_results` != 0.
+ bytes skipped_cursor = 3;
+
+ // The result type for every entity in `entity_results`.
+ EntityResult.ResultType entity_result_type = 1;
+
+ // The results for this batch.
+ repeated EntityResult entity_results = 2;
+
+ // A cursor that points to the position after the last result in the batch.
+ bytes end_cursor = 4;
+
+ // The state of the query after the current batch.
+ MoreResultsType more_results = 5;
+
+ // The version number of the snapshot this batch was returned from.
+ // This applies to the range of results from the query's `start_cursor` (or
+ // the beginning of the query if no cursor was given) to this batch's
+ // `end_cursor` (not the query's `end_cursor`).
+ //
+ // In a single transaction, subsequent query result batches for the same query
+ // can have a greater snapshot version number. Each batch's snapshot version
+ // is valid for all preceding batches.
+ // The value will be zero for eventually consistent queries.
+ int64 snapshot_version = 7;
+}
diff --git a/third_party/googleapis/google/devtools/build/v1/build_events.proto b/third_party/googleapis/google/devtools/build/v1/build_events.proto
new file mode 100644
index 0000000000..e745aee828
--- /dev/null
+++ b/third_party/googleapis/google/devtools/build/v1/build_events.proto
@@ -0,0 +1,202 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.devtools.build.v1;
+
+import "google/api/annotations.proto";
+import "google/devtools/build/v1/build_status.proto";
+import "google/protobuf/any.proto";
+import "google/protobuf/timestamp.proto";
+import "google/protobuf/wrappers.proto";
+import "google/rpc/status.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/devtools/build/v1;build";
+option java_multiple_files = true;
+option java_outer_classname = "BuildEventProto";
+option java_package = "com.google.devtools.build.v1";
+
+
+// An event representing some state change that occured in the build. This
+// message does not include field for uniquely identifying an event.
+message BuildEvent {
+ // Notification that the build system has attempted to run the build tool.
+ message InvocationAttemptStarted {
+ // The number of the invocation attempt, starting at 1 and increasing by 1
+ // for each new attempt. Can be used to determine if there is a later
+ // invocation attempt replacing the current one a client is processing.
+ int64 attempt_number = 1;
+ }
+
+ // Notification that an invocation attempt has finished.
+ message InvocationAttemptFinished {
+ // The status of the build request.
+ // If OK, the build request was run, though this does not mean the
+ // requested build tool succeeded. "exit_code" will be set to the
+ // exit code of the build tool.
+ // If not OK, the build request was not successfully executed.
+ // "exit_code" will not be set.
+ google.rpc.Status status = 1;
+
+ // The exit code of the build tool.
+ google.protobuf.Int32Value exit_code = 2;
+
+ // Final status of the invocation.
+ BuildStatus invocation_status = 3;
+ }
+
+ // Notification that the build request is enqueued. It could happen when
+ // a new build request is inserted into the build queue, or when a
+ // build request is put back into the build queue due to a previous build
+ // failure.
+ message BuildEnqueued {
+
+ }
+
+ // Notification that the build request has finished, and no further
+ // invocations will occur. Note that this applies to the entire Build.
+ // Individual invocations trigger InvocationFinished when they finish.
+ message BuildFinished {
+ // Final status of the build.
+ BuildStatus status = 1;
+ }
+
+ // Textual output written to standard output or standard error.
+ message ConsoleOutput {
+ // The output stream type.
+ ConsoleOutputStream type = 1;
+
+ // The output stream content.
+ oneof output {
+ // Regular UTF-8 output; normal text.
+ string text_output = 2;
+
+ // Used if the output is not UTF-8 text (for example, a binary proto).
+ bytes binary_output = 3;
+ }
+ }
+
+ // Notification of the end of a build event stream published by a build
+ // component other than CONTROLLER (See StreamId.BuildComponents).
+ message BuildComponentStreamFinished {
+ // How did the event stream finish.
+ enum FinishType {
+ // Unknown or unspecified; callers should never set this value.
+ FINISH_TYPE_UNSPECIFIED = 0;
+
+ // Set by the event publisher to indicate a build event stream is
+ // finished.
+ FINISHED = 1;
+
+ // Set by the WatchBuild RPC server when the publisher of a build event
+ // stream stops publishing events without publishing a
+ // BuildComponentStreamFinished event whose type equals FINISHED.
+ EXPIRED = 2;
+ }
+
+ // How the event stream finished.
+ FinishType type = 1;
+ }
+
+ // The timestamp of this event.
+ google.protobuf.Timestamp event_time = 1;
+
+ // //////////////////////////////////////////////////////////////////////////
+ // Events that indicate a state change of a build request in the build
+ // queue.
+ oneof event {
+ // An invocation attempt has started.
+ InvocationAttemptStarted invocation_attempt_started = 51;
+
+ // An invocation attempt has finished.
+ InvocationAttemptFinished invocation_attempt_finished = 52;
+
+ // The build is enqueued (just inserted to the build queue or put back
+ // into the build queue due to a previous build failure).
+ BuildEnqueued build_enqueued = 53;
+
+ // The build has finished. Set when the build is terminated.
+ BuildFinished build_finished = 55;
+
+ // An event containing printed text.
+ ConsoleOutput console_output = 56;
+
+ // Indicates the end of a build event stream (with the same StreamId) from
+ // a build component executing the requested build task.
+ // *** This field does not indicate the WatchBuild RPC is finished. ***
+ BuildComponentStreamFinished component_stream_finished = 59;
+
+ // Structured build event generated by Bazel about its execution progress.
+ google.protobuf.Any bazel_event = 60;
+
+ // An event that contains supplemental tool-specific information about
+ // build execution.
+ google.protobuf.Any build_execution_event = 61;
+
+ // An event that contains supplemental tool-specific information about
+ // source fetching.
+ google.protobuf.Any source_fetch_event = 62;
+ }
+}
+
+// Unique identifier for a build event stream.
+message StreamId {
+ // Which build component generates this event stream. Each build component
+ // may generate one event stream.
+ enum BuildComponent {
+ // Unknown or unspecified; callers should never set this value.
+ UNKNOWN_COMPONENT = 0;
+
+ // A component that coordinates builds.
+ CONTROLLER = 1;
+
+ // A component that runs executables needed to complete a build.
+ WORKER = 2;
+
+ // A component that builds something.
+ TOOL = 3;
+
+ DEPRECATED = 4;
+ }
+
+ int64 project_number = 5;
+
+ // The id of a Build message.
+ string build_id = 1;
+
+ // The unique invocation ID within this build.
+ // It should be the same as {invocation} (below) during the migration.
+ string invocation_id = 6;
+
+ // The component that emitted this event.
+ BuildComponent component = 3;
+
+ // The unique invocation ID within this build.
+ // It should be the same as {invocation_id} below during the migration.
+ string invocation = 4;
+}
+
+// The type of console output stream.
+enum ConsoleOutputStream {
+ // Unspecified or unknown.
+ UNKNOWN = 0;
+
+ // Normal output stream.
+ STDOUT = 1;
+
+ // Error output stream.
+ STDERR = 2;
+}
diff --git a/third_party/googleapis/google/devtools/build/v1/build_status.proto b/third_party/googleapis/google/devtools/build/v1/build_status.proto
new file mode 100644
index 0000000000..3c2cbb7e67
--- /dev/null
+++ b/third_party/googleapis/google/devtools/build/v1/build_status.proto
@@ -0,0 +1,63 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.devtools.build.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/any.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/devtools/build/v1;build";
+option java_multiple_files = true;
+option java_outer_classname = "BuildStatusProto";
+option java_package = "com.google.devtools.build.v1";
+
+
+// Status used for both invocation attempt and overall build completion.
+message BuildStatus {
+ // The end result of the Build.
+ enum Result {
+ // Unspecified or unknown.
+ UNKNOWN_STATUS = 0;
+
+ // Build was successful and tests (if requested) all pass.
+ COMMAND_SUCCEEDED = 1;
+
+ // Build error and/or test failure.
+ COMMAND_FAILED = 2;
+
+ // Unable to obtain a result due to input provided by the user.
+ USER_ERROR = 3;
+
+ // Unable to obtain a result due to a failure within the build system.
+ SYSTEM_ERROR = 4;
+
+ // Build required too many resources, such as build tool RAM.
+ RESOURCE_EXHAUSTED = 5;
+
+ // An invocation attempt time exceeded its deadline.
+ INVOCATION_DEADLINE_EXCEEDED = 6;
+
+ // Build request time exceeded the request_deadline
+ REQUEST_DEADLINE_EXCEEDED = 8;
+
+ // The build was cancelled by a call to CancelBuild.
+ CANCELLED = 7;
+ }
+
+ // The end result.
+ Result result = 1;
+}
diff --git a/third_party/googleapis/google/devtools/build/v1/publish_build_event.proto b/third_party/googleapis/google/devtools/build/v1/publish_build_event.proto
new file mode 100644
index 0000000000..7589602ad9
--- /dev/null
+++ b/third_party/googleapis/google/devtools/build/v1/publish_build_event.proto
@@ -0,0 +1,136 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.devtools.build.v1;
+
+import "google/api/annotations.proto";
+import "google/api/auth.proto";
+import "google/devtools/build/v1/build_events.proto";
+import "google/protobuf/any.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/empty.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/devtools/build/v1;build";
+option java_multiple_files = true;
+option java_outer_classname = "BackendProto";
+option java_package = "com.google.devtools.build.v1";
+
+
+// A service for publishing BuildEvents. BuildEvents are generated by Build
+// Systems to record actions taken during a Build. Events occur in streams,
+// are identified by a StreamId, and ordered by sequence number in a stream.
+//
+// A Build may contain several streams of BuildEvents, depending on the systems
+// that are involved in the Build. Some BuildEvents are used to declare the
+// beginning and end of major portions of a Build; these are called
+// LifecycleEvents, and are used (for example) to indicate the beginning or end
+// of a Build, and the beginning or end of an Invocation attempt (there can be
+// more than 1 Invocation in a Build if, for example, a failure occurs somewhere
+// and it needs to be retried).
+//
+// Other, build-tool events represent actions taken by the Build tool, such as
+// target objects produced via compilation, tests run, et cetera. There could be
+// more than one build tool stream for an invocation attempt of a build.
+service PublishBuildEvent {
+ // Publish a build event stating the new state of a build (typically from the
+ // build queue). If the event is a BuildEnqueued event, also register the new
+ // build request ID and its build type to BES.
+ //
+ // The backend will persist the event and deliver it to registered frontend
+ // jobs immediately without batching.
+ //
+ // The commit status of the request is reported by the RPC's util_status()
+ // function. The error code is the canoncial error code defined in
+ // //util/task/codes.proto.
+ rpc PublishLifecycleEvent(PublishLifecycleEventRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { post: "/v1/lifecycleEvents:publish" body: "*" };
+ }
+
+ // Publish build tool events belonging to the same stream to a backend job
+ // using bidirectional streaming.
+ rpc PublishBuildToolEventStream(stream OrderedBuildEvent) returns (stream PublishBuildToolEventStreamResponse) {
+ option (google.api.http) = { post: "/v1/events:publish" body: "*" };
+ }
+}
+
+// Publishes 'lifecycle events' that update the high-level state of a build:
+// - BuildEnqueued: When a build is scheduled.
+// - InvocationAttemptStarted: When work for a build starts; there can be
+// multiple invocations for a build (e.g. retries).
+// - InvocationAttemptCompleted: When work for a build finishes.
+// - BuildFinished: When a build is finished.
+message PublishLifecycleEventRequest {
+ // The service level of the build request. Backends only uses this value when
+ // the BuildEnqueued event is published to determine what level of service
+ // this build should receive.
+ enum ServiceLevel {
+ // Non-interactive builds can tolerate longer event latencies. This is the
+ // default ServiceLevel if callers do not specify one.
+ NONINTERACTIVE = 0;
+
+ // The events of an interactive build should be delivered with low latency.
+ INTERACTIVE = 1;
+ }
+
+ // The interactivity of this build.
+ ServiceLevel service_level = 1;
+
+ // The lifecycle build event. If this is a build tool event, the RPC will fail
+ // with INVALID_REQUEST.
+ OrderedBuildEvent build_event = 2;
+
+ // If the next event for this build or invocation (depending on the event
+ // type) hasn't been published after this duration from when {build_event}
+ // is written to BES, consider this stream expired. If this field is not set,
+ // BES backend will use its own default value.
+ google.protobuf.Duration stream_timeout = 3;
+
+ // Additional information about a build request. These are define by the event
+ // publishers, and the Build Event Service does not validate or interpret
+ // them. They are used while notifying internal systems of new builds and
+ // invocations if the OrderedBuildEvent.event type is
+ // BuildEnqueued/InvocationAttemptStarted.
+ repeated string notification_keywords = 4;
+
+ // This field identifies which project (if any) the build is associated with.
+ string project_id = 6;
+}
+
+// States which event has been committed. Any failure to commit will cause
+// RPC errors, hence not recorded by this proto.
+message PublishBuildToolEventStreamResponse {
+ // The stream that contains this event.
+ StreamId stream_id = 1;
+
+ // The sequence number of this event that has been committed.
+ int64 sequence_number = 2;
+}
+
+// Build event with contextual information about the stream it belongs to and
+// its position in that stream.
+message OrderedBuildEvent {
+ // Which build event stream this event belongs to.
+ StreamId stream_id = 1;
+
+ // The position of this event in the stream. The sequence numbers for a build
+ // event stream should be a sequence of consecutive natural numbers starting
+ // from one. (1, 2, 3, ...)
+ int64 sequence_number = 2;
+
+ // The actual event.
+ BuildEvent event = 3;
+}
diff --git a/third_party/googleapis/google/devtools/cloudbuild/README.md b/third_party/googleapis/google/devtools/cloudbuild/README.md
new file mode 100644
index 0000000000..a0685ba3a4
--- /dev/null
+++ b/third_party/googleapis/google/devtools/cloudbuild/README.md
@@ -0,0 +1 @@
+The Google Cloud Container Builder API lets you to build an application or part of an application from source code stored in Google Cloud Storage or a Google Cloud Source Repository and package it into container images. The container images are then stored in Google Container Registry. You can optionally copy the images to another container registry as required. \ No newline at end of file
diff --git a/third_party/googleapis/google/devtools/cloudbuild/v1/cloudbuild.proto b/third_party/googleapis/google/devtools/cloudbuild/v1/cloudbuild.proto
new file mode 100644
index 0000000000..c94458b800
--- /dev/null
+++ b/third_party/googleapis/google/devtools/cloudbuild/v1/cloudbuild.proto
@@ -0,0 +1,579 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.devtools.cloudbuild.v1;
+
+import "google/api/annotations.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/devtools/cloudbuild/v1;cloudbuild";
+option java_multiple_files = true;
+option java_package = "com.google.cloudbuild.v1";
+option objc_class_prefix = "GCB";
+
+
+// Manages container image build requests in the cloud.
+//
+// The main concept used by this API is a Build, which describes the location of
+// the source to build, how to build the source into a container image, and what
+// tag to apply to the built image when it is pushed to Google Container
+// Registry.
+//
+// A user can list previously-requested builds or get builds by their ID to
+// determine the status of the build.
+service CloudBuild {
+ // Starts a build with the specified configuration.
+ //
+ // The long-running Operation returned by this method will include the ID of
+ // the build, which can be passed to GetBuild to determine its status (e.g.,
+ // success or failure).
+ rpc CreateBuild(CreateBuildRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1/projects/{project_id}/builds" body: "build" };
+ }
+
+ // Returns information about a previously requested build.
+ //
+ // The Build that is returned includes its status (e.g., success or failure,
+ // or in-progress), and timing information.
+ rpc GetBuild(GetBuildRequest) returns (Build) {
+ option (google.api.http) = { get: "/v1/projects/{project_id}/builds/{id}" };
+ }
+
+ // Lists previously requested builds.
+ //
+ // Previously requested builds may still be in-progress, or may have finished
+ // successfully or unsuccessfully.
+ rpc ListBuilds(ListBuildsRequest) returns (ListBuildsResponse) {
+ option (google.api.http) = { get: "/v1/projects/{project_id}/builds" };
+ }
+
+ // Cancels a requested build in progress.
+ rpc CancelBuild(CancelBuildRequest) returns (Build) {
+ option (google.api.http) = { post: "/v1/projects/{project_id}/builds/{id}:cancel" body: "*" };
+ }
+
+ // Creates a new BuildTrigger.
+ //
+ // This API is experimental.
+ rpc CreateBuildTrigger(CreateBuildTriggerRequest) returns (BuildTrigger) {
+ option (google.api.http) = { post: "/v1/projects/{project_id}/triggers" body: "trigger" };
+ }
+
+ // Gets information about a BuildTrigger.
+ //
+ // This API is experimental.
+ rpc GetBuildTrigger(GetBuildTriggerRequest) returns (BuildTrigger) {
+ option (google.api.http) = { get: "/v1/projects/{project_id}/triggers/{trigger_id}" };
+ }
+
+ // Lists existing BuildTrigger.
+ //
+ // This API is experimental.
+ rpc ListBuildTriggers(ListBuildTriggersRequest) returns (ListBuildTriggersResponse) {
+ option (google.api.http) = { get: "/v1/projects/{project_id}/triggers" };
+ }
+
+ // Deletes an BuildTrigger by its project ID and trigger ID.
+ //
+ // This API is experimental.
+ rpc DeleteBuildTrigger(DeleteBuildTriggerRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/projects/{project_id}/triggers/{trigger_id}" };
+ }
+
+ // Updates an BuildTrigger by its project ID and trigger ID.
+ //
+ // This API is experimental.
+ rpc UpdateBuildTrigger(UpdateBuildTriggerRequest) returns (BuildTrigger) {
+ option (google.api.http) = { patch: "/v1/projects/{project_id}/triggers/{trigger_id}" body: "trigger" };
+ }
+}
+
+// StorageSource describes the location of the source in an archive file in
+// Google Cloud Storage.
+message StorageSource {
+ // Google Cloud Storage bucket containing source (see
+ // [Bucket Name
+ // Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)).
+ string bucket = 1;
+
+ // Google Cloud Storage object containing source.
+ //
+ // This object must be a gzipped archive file (.tar.gz) containing source to
+ // build.
+ string object = 2;
+
+ // Google Cloud Storage generation for the object. If the generation is
+ // omitted, the latest generation will be used.
+ int64 generation = 3;
+}
+
+// RepoSource describes the location of the source in a Google Cloud Source
+// Repository.
+message RepoSource {
+ // ID of the project that owns the repo. If omitted, the project ID requesting
+ // the build is assumed.
+ string project_id = 1;
+
+ // Name of the repo. If omitted, the name "default" is assumed.
+ string repo_name = 2;
+
+ // A revision within the source repository must be specified in
+ // one of these ways.
+ oneof revision {
+ // Name of the branch to build.
+ string branch_name = 3;
+
+ // Name of the tag to build.
+ string tag_name = 4;
+
+ // Explicit commit SHA to build.
+ string commit_sha = 5;
+ }
+}
+
+// Source describes the location of the source in a supported storage
+// service.
+message Source {
+ // Describes location of source.
+ oneof source {
+ // If provided, get the source from this location in in Google Cloud
+ // Storage.
+ StorageSource storage_source = 2;
+
+ // If provided, get source from this location in a Cloud Repo.
+ RepoSource repo_source = 3;
+ }
+}
+
+// BuiltImage describes an image built by the pipeline.
+message BuiltImage {
+ // Name used to push the container image to Google Container Registry, as
+ // presented to `docker push`.
+ string name = 1;
+
+ // Docker Registry 2.0 digest.
+ string digest = 3;
+}
+
+// BuildStep describes a step to perform in the build pipeline.
+message BuildStep {
+ // The name of the container image that will run this particular build step.
+ //
+ // If the image is already available in the host's Docker daemon's cache, it
+ // will be run directly. If not, the host will attempt to pull the image
+ // first, using the builder service account's credentials if necessary.
+ //
+ // The Docker daemon's cache will already have the latest versions of all of
+ // the officially supported build steps
+ // (https://github.com/GoogleCloudPlatform/cloud-builders). The Docker daemon
+ // will also have cached many of the layers for some popular images, like
+ // "ubuntu", "debian", but they will be refreshed at the time you attempt to
+ // use them.
+ //
+ // If you built an image in a previous build step, it will be stored in the
+ // host's Docker daemon's cache and is available to use as the name for a
+ // later build step.
+ string name = 1;
+
+ // A list of environment variable definitions to be used when running a step.
+ //
+ // The elements are of the form "KEY=VALUE" for the environment variable "KEY"
+ // being given the value "VALUE".
+ repeated string env = 2;
+
+ // A list of arguments that will be presented to the step when it is started.
+ //
+ // If the image used to run the step's container has an entrypoint, these args
+ // will be used as arguments to that entrypoint. If the image does not define
+ // an entrypoint, the first element in args will be used as the entrypoint,
+ // and the remainder will be used as arguments.
+ repeated string args = 3;
+
+ // Working directory (relative to project source root) to use when running
+ // this operation's container.
+ string dir = 4;
+
+ // Optional unique identifier for this build step, used in wait_for to
+ // reference this build step as a dependency.
+ string id = 5;
+
+ // The ID(s) of the step(s) that this build step depends on.
+ // This build step will not start until all the build steps in wait_for
+ // have completed successfully. If wait_for is empty, this build step will
+ // start when all previous build steps in the Build.Steps list have completed
+ // successfully.
+ repeated string wait_for = 6;
+
+ // Optional entrypoint to be used instead of the build step image's default
+ // If unset, the image's default will be used.
+ string entrypoint = 7;
+}
+
+// Results describes the artifacts created by the build pipeline.
+message Results {
+ // Images that were built as a part of the build.
+ repeated BuiltImage images = 2;
+
+ // List of build step digests, in order corresponding to build step indices.
+ repeated string build_step_images = 3;
+}
+
+// A build resource in the Container Builder API.
+//
+// At a high level, a Build describes where to find source code, how to build
+// it (for example, the builder image to run on the source), and what tag to
+// apply to the built image when it is pushed to Google Container Registry.
+//
+// Fields can include the following variables which will be expanded when the
+// build is created:
+//
+// - $PROJECT_ID: the project ID of the build.
+// - $BUILD_ID: the autogenerated ID of the build.
+// - $REPO_NAME: the source repository name specified by RepoSource.
+// - $BRANCH_NAME: the branch name specified by RepoSource.
+// - $TAG_NAME: the tag name specified by RepoSource.
+// - $REVISION_ID or $COMMIT_SHA: the commit SHA specified by RepoSource or
+// resolved from the specified branch or tag.
+message Build {
+ // Possible status of a build.
+ enum Status {
+ // Status of the build is unknown.
+ STATUS_UNKNOWN = 0;
+
+ // Build is queued; work has not yet begun.
+ QUEUED = 1;
+
+ // Build is being executed.
+ WORKING = 2;
+
+ // Build finished successfully.
+ SUCCESS = 3;
+
+ // Build failed to complete successfully.
+ FAILURE = 4;
+
+ // Build failed due to an internal cause.
+ INTERNAL_ERROR = 5;
+
+ // Build took longer than was allowed.
+ TIMEOUT = 6;
+
+ // Build was canceled by a user.
+ CANCELLED = 7;
+ }
+
+ // Unique identifier of the build.
+ // @OutputOnly
+ string id = 1;
+
+ // ID of the project.
+ // @OutputOnly.
+ string project_id = 16;
+
+ // Status of the build.
+ // @OutputOnly
+ Status status = 2;
+
+ // Customer-readable message about the current status.
+ // @OutputOnly
+ string status_detail = 24;
+
+ // Describes where to find the source files to build.
+ Source source = 3;
+
+ // Describes the operations to be performed on the workspace.
+ repeated BuildStep steps = 11;
+
+ // Results of the build.
+ // @OutputOnly
+ Results results = 10;
+
+ // Time at which the request to create the build was received.
+ // @OutputOnly
+ google.protobuf.Timestamp create_time = 6;
+
+ // Time at which execution of the build was started.
+ // @OutputOnly
+ google.protobuf.Timestamp start_time = 7;
+
+ // Time at which execution of the build was finished.
+ //
+ // The difference between finish_time and start_time is the duration of the
+ // build's execution.
+ // @OutputOnly
+ google.protobuf.Timestamp finish_time = 8;
+
+ // Amount of time that this build should be allowed to run, to second
+ // granularity. If this amount of time elapses, work on the build will cease
+ // and the build status will be TIMEOUT.
+ //
+ // Default time is ten minutes.
+ google.protobuf.Duration timeout = 12;
+
+ // A list of images to be pushed upon the successful completion of all build
+ // steps.
+ //
+ // The images will be pushed using the builder service account's credentials.
+ //
+ // The digests of the pushed images will be stored in the Build resource's
+ // results field.
+ //
+ // If any of the images fail to be pushed, the build is marked FAILURE.
+ repeated string images = 13;
+
+ // Google Cloud Storage bucket where logs should be written (see
+ // [Bucket Name
+ // Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)).
+ // Logs file names will be of the format `${logs_bucket}/log-${build_id}.txt`.
+ string logs_bucket = 19;
+
+ // A permanent fixed identifier for source.
+ // @OutputOnly
+ SourceProvenance source_provenance = 21;
+
+ // The ID of the BuildTrigger that triggered this build, if it was
+ // triggered automatically.
+ // @OutputOnly
+ string build_trigger_id = 22;
+
+ // Special options for this build.
+ BuildOptions options = 23;
+
+ // URL to logs for this build in Google Cloud Logging.
+ // @OutputOnly
+ string log_url = 25;
+
+ // Substitutions data for Build resource.
+ map<string, string> substitutions = 29;
+}
+
+// Metadata for build operations.
+message BuildOperationMetadata {
+ // The build that the operation is tracking.
+ Build build = 1;
+}
+
+// Provenance of the source. Ways to find the original source, or verify that
+// some source was used for this build.
+message SourceProvenance {
+ // A copy of the build's source.storage_source, if exists, with any
+ // generations resolved.
+ StorageSource resolved_storage_source = 3;
+
+ // A copy of the build's source.repo_source, if exists, with any
+ // revisions resolved.
+ RepoSource resolved_repo_source = 6;
+
+ // Hash(es) of the build source, which can be used to verify that the original
+ // source integrity was maintained in the build. Note that FileHashes will
+ // only be populated if BuildOptions has requested a SourceProvenanceHash.
+ //
+ // The keys to this map are file paths used as build source and the values
+ // contain the hash values for those files.
+ //
+ // If the build source came in a single package such as a gzipped tarfile
+ // (.tar.gz), the FileHash will be for the single path to that file.
+ // @OutputOnly
+ map<string, FileHashes> file_hashes = 4;
+}
+
+// Container message for hashes of byte content of files, used in
+// SourceProvenance messages to verify integrity of source input to the build.
+message FileHashes {
+ // Collection of file hashes.
+ repeated Hash file_hash = 1;
+}
+
+// Container message for hash values.
+message Hash {
+ // Specifies the hash algorithm, if any.
+ enum HashType {
+ // No hash requested.
+ NONE = 0;
+
+ // Use a sha256 hash.
+ SHA256 = 1;
+ }
+
+ // The type of hash that was performed.
+ HashType type = 1;
+
+ // The hash value.
+ bytes value = 2;
+}
+
+// Request to create a new build.
+message CreateBuildRequest {
+ // ID of the project.
+ string project_id = 1;
+
+ // Build resource to create.
+ Build build = 2;
+}
+
+// Request to get a build.
+message GetBuildRequest {
+ // ID of the project.
+ string project_id = 1;
+
+ // ID of the build.
+ string id = 2;
+}
+
+// Request to list builds.
+message ListBuildsRequest {
+ // ID of the project.
+ string project_id = 1;
+
+ // Number of results to return in the list.
+ int32 page_size = 2;
+
+ // Token to provide to skip to a particular spot in the list.
+ string page_token = 3;
+
+ // The raw filter text to constrain the results.
+ string filter = 8;
+}
+
+// Response including listed builds.
+message ListBuildsResponse {
+ // Builds will be sorted by create_time, descending.
+ repeated Build builds = 1;
+
+ // Token to receive the next page of results.
+ string next_page_token = 2;
+}
+
+// Request to cancel an ongoing build.
+message CancelBuildRequest {
+ // ID of the project.
+ string project_id = 1;
+
+ // ID of the build.
+ string id = 2;
+}
+
+// Configuration for an automated build in response to source repository
+// changes.
+message BuildTrigger {
+ // Unique identifier of the trigger.
+ //
+ // @OutputOnly
+ string id = 1;
+
+ // Human-readable description of this trigger.
+ string description = 10;
+
+ // Template describing the types of source changes to trigger a build.
+ //
+ // Branch and tag names in trigger templates are interpreted as regular
+ // expressions. Any branch or tag change that matches that regular expression
+ // will trigger a build.
+ RepoSource trigger_template = 7;
+
+ // Template describing the Build request to make when the trigger is matched.
+ oneof build_template {
+ // Contents of the build template.
+ Build build = 4;
+
+ // Path, from the source root, to a file whose contents is used for the
+ // template.
+ string filename = 8;
+ }
+
+ // Time when the trigger was created.
+ //
+ // @OutputOnly
+ google.protobuf.Timestamp create_time = 5;
+
+ // If true, the trigger will never result in a build.
+ bool disabled = 9;
+
+ // Substitutions data for Build resource.
+ map<string, string> substitutions = 11;
+}
+
+// Request to create a new BuildTrigger.
+message CreateBuildTriggerRequest {
+ // ID of the project for which to configure automatic builds.
+ string project_id = 1;
+
+ // BuildTrigger to create.
+ BuildTrigger trigger = 2;
+}
+
+// Returns the BuildTrigger with the specified ID.
+message GetBuildTriggerRequest {
+ // ID of the project that owns the trigger.
+ string project_id = 1;
+
+ // ID of the BuildTrigger to get.
+ string trigger_id = 2;
+}
+
+// Request to list existing BuildTriggers.
+message ListBuildTriggersRequest {
+ // ID of the project for which to list BuildTriggers.
+ string project_id = 1;
+}
+
+// Response containing existing BuildTriggers.
+message ListBuildTriggersResponse {
+ // BuildTriggers for the project, sorted by create_time descending.
+ repeated BuildTrigger triggers = 1;
+}
+
+// Request to delete a BuildTrigger.
+message DeleteBuildTriggerRequest {
+ // ID of the project that owns the trigger.
+ string project_id = 1;
+
+ // ID of the BuildTrigger to delete.
+ string trigger_id = 2;
+}
+
+// Request to update an existing BuildTrigger.
+message UpdateBuildTriggerRequest {
+ // ID of the project that owns the trigger.
+ string project_id = 1;
+
+ // ID of the BuildTrigger to update.
+ string trigger_id = 2;
+
+ // BuildTrigger to update.
+ BuildTrigger trigger = 3;
+}
+
+// Optional arguments to enable specific features of builds.
+message BuildOptions {
+ // Specifies the manner in which the build should be verified, if at all.
+ enum VerifyOption {
+ // Not a verifiable build. (default)
+ NOT_VERIFIED = 0;
+
+ // Verified build.
+ VERIFIED = 1;
+ }
+
+ // Requested hash for SourceProvenance.
+ repeated Hash.HashType source_provenance_hash = 1;
+
+ // Requested verifiability options.
+ VerifyOption requested_verify_option = 2;
+}
diff --git a/third_party/googleapis/google/devtools/clouddebugger/clouddebugger.yaml b/third_party/googleapis/google/devtools/clouddebugger/clouddebugger.yaml
new file mode 100644
index 0000000000..2277b93322
--- /dev/null
+++ b/third_party/googleapis/google/devtools/clouddebugger/clouddebugger.yaml
@@ -0,0 +1,36 @@
+type: google.api.Service
+
+config_version: 2
+
+name: clouddebugger.googleapis.com
+title: Stackdriver Debugger API
+
+documentation:
+ summary: |
+ Examines the call stack and variables of a running application
+ without stopping or slowing it down.
+
+apis:
+- name: google.devtools.clouddebugger.v2.Debugger2
+- name: google.devtools.clouddebugger.v2.Controller2
+
+authentication:
+ rules:
+ - selector: |-
+ google.devtools.clouddebugger.v2.Debugger2.SetBreakpoint,
+ google.devtools.clouddebugger.v2.Debugger2.GetBreakpoint,
+ google.devtools.clouddebugger.v2.Debugger2.DeleteBreakpoint,
+ google.devtools.clouddebugger.v2.Debugger2.ListBreakpoints,
+ google.devtools.clouddebugger.v2.Debugger2.ListDebuggees
+ oauth:
+ canonical_scopes: |-
+ https://www.googleapis.com/auth/cloud_debugger,
+ https://www.googleapis.com/auth/cloud-platform
+ - selector: |-
+ google.devtools.clouddebugger.v2.Controller2.RegisterDebuggee,
+ google.devtools.clouddebugger.v2.Controller2.ListActiveBreakpoints,
+ google.devtools.clouddebugger.v2.Controller2.UpdateActiveBreakpoint
+ oauth:
+ canonical_scopes: |-
+ https://www.googleapis.com/auth/cloud_debugger,
+ https://www.googleapis.com/auth/cloud-platform
diff --git a/third_party/googleapis/google/devtools/clouddebugger/v2/clouddebugger_gapic.yaml b/third_party/googleapis/google/devtools/clouddebugger/v2/clouddebugger_gapic.yaml
new file mode 100644
index 0000000000..da26544cac
--- /dev/null
+++ b/third_party/googleapis/google/devtools/clouddebugger/v2/clouddebugger_gapic.yaml
@@ -0,0 +1,168 @@
+type: com.google.api.codegen.ConfigProto
+language_settings:
+ java:
+ package_name: com.google.cloud.debugger.spi.v2
+ python:
+ package_name: google.cloud.gapic.debugger.v2
+ go:
+ package_name: cloud.google.com/go/debugger/apiv2
+ domain_layer_location: cloud.google.com/go/cmd/go-cloud-debug-agent
+ csharp:
+ package_name: Google.Devtools.Clouddebugger.V2
+ ruby:
+ package_name: Google::Cloud::Debugger::V2
+ php:
+ package_name: Google\Cloud\Debugger\V2
+ nodejs:
+ package_name: debugger.v2
+ domain_layer_location: google-cloud
+license_header:
+ copyright_file: copyright-google.txt
+ license_file: license-header-apache-2.0.txt
+interfaces:
+- name: google.devtools.clouddebugger.v2.Debugger2
+ collections: []
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 60000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 60000
+ total_timeout_millis: 600000
+ methods:
+ - name: SetBreakpoint
+ required_fields:
+ - debuggee_id
+ - breakpoint
+ - client_version
+ flattening:
+ groups:
+ - parameters:
+ - debuggee_id
+ - breakpoint
+ - client_version
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: GetBreakpoint
+ required_fields:
+ - debuggee_id
+ - breakpoint_id
+ - client_version
+ flattening:
+ groups:
+ - parameters:
+ - debuggee_id
+ - breakpoint_id
+ - client_version
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: DeleteBreakpoint
+ required_fields:
+ - debuggee_id
+ - breakpoint_id
+ - client_version
+ flattening:
+ groups:
+ - parameters:
+ - debuggee_id
+ - breakpoint_id
+ - client_version
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: ListBreakpoints
+ required_fields:
+ - debuggee_id
+ - client_version
+ flattening:
+ groups:
+ - parameters:
+ - debuggee_id
+ - client_version
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: ListDebuggees
+ required_fields:
+ - project
+ - client_version
+ flattening:
+ groups:
+ - parameters:
+ - project
+ - client_version
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+- name: google.devtools.clouddebugger.v2.Controller2
+ collections: []
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 60000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 60000
+ total_timeout_millis: 600000
+ methods:
+ - name: RegisterDebuggee
+ required_fields:
+ - debuggee
+ flattening:
+ groups:
+ - parameters:
+ - debuggee
+ request_object_method: false
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: ListActiveBreakpoints
+ required_fields:
+ - debuggee_id
+ flattening:
+ groups:
+ - parameters:
+ - debuggee_id
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: UpdateActiveBreakpoint
+ required_fields:
+ - debuggee_id
+ - breakpoint
+ flattening:
+ groups:
+ - parameters:
+ - debuggee_id
+ - breakpoint
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
diff --git a/third_party/googleapis/google/devtools/clouddebugger/v2/controller.proto b/third_party/googleapis/google/devtools/clouddebugger/v2/controller.proto
new file mode 100644
index 0000000000..f72cf08d14
--- /dev/null
+++ b/third_party/googleapis/google/devtools/clouddebugger/v2/controller.proto
@@ -0,0 +1,158 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.devtools.clouddebugger.v2;
+
+import "google/api/annotations.proto";
+import "google/devtools/clouddebugger/v2/data.proto";
+import "google/protobuf/empty.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2;clouddebugger";
+option java_multiple_files = true;
+option java_outer_classname = "ControllerProto";
+option java_package = "com.google.devtools.clouddebugger.v2";
+
+
+// The Controller service provides the API for orchestrating a collection of
+// debugger agents to perform debugging tasks. These agents are each attached
+// to a process of an application which may include one or more replicas.
+//
+// The debugger agents register with the Controller to identify the application
+// being debugged, the Debuggee. All agents that register with the same data,
+// represent the same Debuggee, and are assigned the same `debuggee_id`.
+//
+// The debugger agents call the Controller to retrieve the list of active
+// Breakpoints. Agents with the same `debuggee_id` get the same breakpoints
+// list. An agent that can fulfill the breakpoint request updates the
+// Controller with the breakpoint result. The controller selects the first
+// result received and discards the rest of the results.
+// Agents that poll again for active breakpoints will no longer have
+// the completed breakpoint in the list and should remove that breakpoint from
+// their attached process.
+//
+// The Controller service does not provide a way to retrieve the results of
+// a completed breakpoint. This functionality is available using the Debugger
+// service.
+service Controller2 {
+ // Registers the debuggee with the controller service.
+ //
+ // All agents attached to the same application should call this method with
+ // the same request content to get back the same stable `debuggee_id`. Agents
+ // should call this method again whenever `google.rpc.Code.NOT_FOUND` is
+ // returned from any controller method.
+ //
+ // This allows the controller service to disable the agent or recover from any
+ // data loss. If the debuggee is disabled by the server, the response will
+ // have `is_disabled` set to `true`.
+ rpc RegisterDebuggee(RegisterDebuggeeRequest) returns (RegisterDebuggeeResponse) {
+ option (google.api.http) = { post: "/v2/controller/debuggees/register" body: "*" };
+ }
+
+ // Returns the list of all active breakpoints for the debuggee.
+ //
+ // The breakpoint specification (location, condition, and expression
+ // fields) is semantically immutable, although the field values may
+ // change. For example, an agent may update the location line number
+ // to reflect the actual line where the breakpoint was set, but this
+ // doesn't change the breakpoint semantics.
+ //
+ // This means that an agent does not need to check if a breakpoint has changed
+ // when it encounters the same breakpoint on a successive call.
+ // Moreover, an agent should remember the breakpoints that are completed
+ // until the controller removes them from the active list to avoid
+ // setting those breakpoints again.
+ rpc ListActiveBreakpoints(ListActiveBreakpointsRequest) returns (ListActiveBreakpointsResponse) {
+ option (google.api.http) = { get: "/v2/controller/debuggees/{debuggee_id}/breakpoints" };
+ }
+
+ // Updates the breakpoint state or mutable fields.
+ // The entire Breakpoint message must be sent back to the controller
+ // service.
+ //
+ // Updates to active breakpoint fields are only allowed if the new value
+ // does not change the breakpoint specification. Updates to the `location`,
+ // `condition` and `expression` fields should not alter the breakpoint
+ // semantics. These may only make changes such as canonicalizing a value
+ // or snapping the location to the correct line of code.
+ rpc UpdateActiveBreakpoint(UpdateActiveBreakpointRequest) returns (UpdateActiveBreakpointResponse) {
+ option (google.api.http) = { put: "/v2/controller/debuggees/{debuggee_id}/breakpoints/{breakpoint.id}" body: "*" };
+ }
+}
+
+// Request to register a debuggee.
+message RegisterDebuggeeRequest {
+ // Debuggee information to register.
+ // The fields `project`, `uniquifier`, `description` and `agent_version`
+ // of the debuggee must be set.
+ Debuggee debuggee = 1;
+}
+
+// Response for registering a debuggee.
+message RegisterDebuggeeResponse {
+ // Debuggee resource.
+ // The field `id` is guranteed to be set (in addition to the echoed fields).
+ Debuggee debuggee = 1;
+}
+
+// Request to list active breakpoints.
+message ListActiveBreakpointsRequest {
+ // Identifies the debuggee.
+ string debuggee_id = 1;
+
+ // A wait token that, if specified, blocks the method call until the list
+ // of active breakpoints has changed, or a server selected timeout has
+ // expired. The value should be set from the last returned response.
+ string wait_token = 2;
+
+ // If set to `true`, returns `google.rpc.Code.OK` status and sets the
+ // `wait_expired` response field to `true` when the server-selected timeout
+ // has expired (recommended).
+ //
+ // If set to `false`, returns `google.rpc.Code.ABORTED` status when the
+ // server-selected timeout has expired (deprecated).
+ bool success_on_timeout = 3;
+}
+
+// Response for listing active breakpoints.
+message ListActiveBreakpointsResponse {
+ // List of all active breakpoints.
+ // The fields `id` and `location` are guaranteed to be set on each breakpoint.
+ repeated Breakpoint breakpoints = 1;
+
+ // A wait token that can be used in the next method call to block until
+ // the list of breakpoints changes.
+ string next_wait_token = 2;
+
+ // The `wait_expired` field is set to true by the server when the
+ // request times out and the field `success_on_timeout` is set to true.
+ bool wait_expired = 3;
+}
+
+// Request to update an active breakpoint.
+message UpdateActiveBreakpointRequest {
+ // Identifies the debuggee being debugged.
+ string debuggee_id = 1;
+
+ // Updated breakpoint information.
+ // The field 'id' must be set.
+ Breakpoint breakpoint = 2;
+}
+
+// Response for updating an active breakpoint.
+// The message is defined to allow future extensions.
+message UpdateActiveBreakpointResponse {
+
+}
diff --git a/third_party/googleapis/google/devtools/clouddebugger/v2/data.proto b/third_party/googleapis/google/devtools/clouddebugger/v2/data.proto
new file mode 100644
index 0000000000..8ebe7d1909
--- /dev/null
+++ b/third_party/googleapis/google/devtools/clouddebugger/v2/data.proto
@@ -0,0 +1,448 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.devtools.clouddebugger.v2;
+
+import "google/api/annotations.proto";
+import "google/devtools/source/v1/source_context.proto";
+import "google/protobuf/timestamp.proto";
+import "google/protobuf/wrappers.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2;clouddebugger";
+option java_multiple_files = true;
+option java_outer_classname = "DataProto";
+option java_package = "com.google.devtools.clouddebugger.v2";
+
+
+// Represents a message with parameters.
+message FormatMessage {
+ // Format template for the message. The `format` uses placeholders `$0`,
+ // `$1`, etc. to reference parameters. `$$` can be used to denote the `$`
+ // character.
+ //
+ // Examples:
+ //
+ // * `Failed to load '$0' which helps debug $1 the first time it
+ // is loaded. Again, $0 is very important.`
+ // * `Please pay $$10 to use $0 instead of $1.`
+ string format = 1;
+
+ // Optional parameters to be embedded into the message.
+ repeated string parameters = 2;
+}
+
+// Represents a contextual status message.
+// The message can indicate an error or informational status, and refer to
+// specific parts of the containing object.
+// For example, the `Breakpoint.status` field can indicate an error referring
+// to the `BREAKPOINT_SOURCE_LOCATION` with the message `Location not found`.
+message StatusMessage {
+ // Enumerates references to which the message applies.
+ enum Reference {
+ // Status doesn't refer to any particular input.
+ UNSPECIFIED = 0;
+
+ // Status applies to the breakpoint and is related to its location.
+ BREAKPOINT_SOURCE_LOCATION = 3;
+
+ // Status applies to the breakpoint and is related to its condition.
+ BREAKPOINT_CONDITION = 4;
+
+ // Status applies to the breakpoint and is related to its expressions.
+ BREAKPOINT_EXPRESSION = 7;
+
+ // Status applies to the breakpoint and is related to its age.
+ BREAKPOINT_AGE = 8;
+
+ // Status applies to the entire variable.
+ VARIABLE_NAME = 5;
+
+ // Status applies to variable value (variable name is valid).
+ VARIABLE_VALUE = 6;
+ }
+
+ // Distinguishes errors from informational messages.
+ bool is_error = 1;
+
+ // Reference to which the message applies.
+ Reference refers_to = 2;
+
+ // Status message text.
+ FormatMessage description = 3;
+}
+
+// Represents a location in the source code.
+message SourceLocation {
+ // Path to the source file within the source context of the target binary.
+ string path = 1;
+
+ // Line inside the file. The first line in the file has the value `1`.
+ int32 line = 2;
+}
+
+// Represents a variable or an argument possibly of a compound object type.
+// Note how the following variables are represented:
+//
+// 1) A simple variable:
+//
+// int x = 5
+//
+// { name: "x", value: "5", type: "int" } // Captured variable
+//
+// 2) A compound object:
+//
+// struct T {
+// int m1;
+// int m2;
+// };
+// T x = { 3, 7 };
+//
+// { // Captured variable
+// name: "x",
+// type: "T",
+// members { name: "m1", value: "3", type: "int" },
+// members { name: "m2", value: "7", type: "int" }
+// }
+//
+// 3) A pointer where the pointee was captured:
+//
+// T x = { 3, 7 };
+// T* p = &x;
+//
+// { // Captured variable
+// name: "p",
+// type: "T*",
+// value: "0x00500500",
+// members { name: "m1", value: "3", type: "int" },
+// members { name: "m2", value: "7", type: "int" }
+// }
+//
+// 4) A pointer where the pointee was not captured:
+//
+// T* p = new T;
+//
+// { // Captured variable
+// name: "p",
+// type: "T*",
+// value: "0x00400400"
+// status { is_error: true, description { format: "unavailable" } }
+// }
+//
+// The status should describe the reason for the missing value,
+// such as `<optimized out>`, `<inaccessible>`, `<pointers limit reached>`.
+//
+// Note that a null pointer should not have members.
+//
+// 5) An unnamed value:
+//
+// int* p = new int(7);
+//
+// { // Captured variable
+// name: "p",
+// value: "0x00500500",
+// type: "int*",
+// members { value: "7", type: "int" } }
+//
+// 6) An unnamed pointer where the pointee was not captured:
+//
+// int* p = new int(7);
+// int** pp = &p;
+//
+// { // Captured variable
+// name: "pp",
+// value: "0x00500500",
+// type: "int**",
+// members {
+// value: "0x00400400",
+// type: "int*"
+// status {
+// is_error: true,
+// description: { format: "unavailable" } }
+// }
+// }
+// }
+//
+// To optimize computation, memory and network traffic, variables that
+// repeat in the output multiple times can be stored once in a shared
+// variable table and be referenced using the `var_table_index` field. The
+// variables stored in the shared table are nameless and are essentially
+// a partition of the complete variable. To reconstruct the complete
+// variable, merge the referencing variable with the referenced variable.
+//
+// When using the shared variable table, the following variables:
+//
+// T x = { 3, 7 };
+// T* p = &x;
+// T& r = x;
+//
+// { name: "x", var_table_index: 3, type: "T" } // Captured variables
+// { name: "p", value "0x00500500", type="T*", var_table_index: 3 }
+// { name: "r", type="T&", var_table_index: 3 }
+//
+// { // Shared variable table entry #3:
+// members { name: "m1", value: "3", type: "int" },
+// members { name: "m2", value: "7", type: "int" }
+// }
+//
+// Note that the pointer address is stored with the referencing variable
+// and not with the referenced variable. This allows the referenced variable
+// to be shared between pointers and references.
+//
+// The type field is optional. The debugger agent may or may not support it.
+message Variable {
+ // Name of the variable, if any.
+ string name = 1;
+
+ // Simple value of the variable.
+ string value = 2;
+
+ // Variable type (e.g. `MyClass`). If the variable is split with
+ // `var_table_index`, `type` goes next to `value`. The interpretation of
+ // a type is agent specific. It is recommended to include the dynamic type
+ // rather than a static type of an object.
+ string type = 6;
+
+ // Members contained or pointed to by the variable.
+ repeated Variable members = 3;
+
+ // Reference to a variable in the shared variable table. More than
+ // one variable can reference the same variable in the table. The
+ // `var_table_index` field is an index into `variable_table` in Breakpoint.
+ google.protobuf.Int32Value var_table_index = 4;
+
+ // Status associated with the variable. This field will usually stay
+ // unset. A status of a single variable only applies to that variable or
+ // expression. The rest of breakpoint data still remains valid. Variables
+ // might be reported in error state even when breakpoint is not in final
+ // state.
+ //
+ // The message may refer to variable name with `refers_to` set to
+ // `VARIABLE_NAME`. Alternatively `refers_to` will be set to `VARIABLE_VALUE`.
+ // In either case variable value and members will be unset.
+ //
+ // Example of error message applied to name: `Invalid expression syntax`.
+ //
+ // Example of information message applied to value: `Not captured`.
+ //
+ // Examples of error message applied to value:
+ //
+ // * `Malformed string`,
+ // * `Field f not found in class C`
+ // * `Null pointer dereference`
+ StatusMessage status = 5;
+}
+
+// Represents a stack frame context.
+message StackFrame {
+ // Demangled function name at the call site.
+ string function = 1;
+
+ // Source location of the call site.
+ SourceLocation location = 2;
+
+ // Set of arguments passed to this function.
+ // Note that this might not be populated for all stack frames.
+ repeated Variable arguments = 3;
+
+ // Set of local variables at the stack frame location.
+ // Note that this might not be populated for all stack frames.
+ repeated Variable locals = 4;
+}
+
+// Represents the breakpoint specification, status and results.
+message Breakpoint {
+ // Actions that can be taken when a breakpoint hits.
+ // Agents should reject breakpoints with unsupported or unknown action values.
+ enum Action {
+ // Capture stack frame and variables and update the breakpoint.
+ // The data is only captured once. After that the breakpoint is set
+ // in a final state.
+ CAPTURE = 0;
+
+ // Log each breakpoint hit. The breakpoint remains active until
+ // deleted or expired.
+ LOG = 1;
+ }
+
+ // Log severity levels.
+ enum LogLevel {
+ // Information log message.
+ INFO = 0;
+
+ // Warning log message.
+ WARNING = 1;
+
+ // Error log message.
+ ERROR = 2;
+ }
+
+ // Breakpoint identifier, unique in the scope of the debuggee.
+ string id = 1;
+
+ // Action that the agent should perform when the code at the
+ // breakpoint location is hit.
+ Action action = 13;
+
+ // Breakpoint source location.
+ SourceLocation location = 2;
+
+ // Condition that triggers the breakpoint.
+ // The condition is a compound boolean expression composed using expressions
+ // in a programming language at the source location.
+ string condition = 3;
+
+ // List of read-only expressions to evaluate at the breakpoint location.
+ // The expressions are composed using expressions in the programming language
+ // at the source location. If the breakpoint action is `LOG`, the evaluated
+ // expressions are included in log statements.
+ repeated string expressions = 4;
+
+ // Only relevant when action is `LOG`. Defines the message to log when
+ // the breakpoint hits. The message may include parameter placeholders `$0`,
+ // `$1`, etc. These placeholders are replaced with the evaluated value
+ // of the appropriate expression. Expressions not referenced in
+ // `log_message_format` are not logged.
+ //
+ // Example: `Message received, id = $0, count = $1` with
+ // `expressions` = `[ message.id, message.count ]`.
+ string log_message_format = 14;
+
+ // Indicates the severity of the log. Only relevant when action is `LOG`.
+ LogLevel log_level = 15;
+
+ // When true, indicates that this is a final result and the
+ // breakpoint state will not change from here on.
+ bool is_final_state = 5;
+
+ // Time this breakpoint was created by the server in seconds resolution.
+ google.protobuf.Timestamp create_time = 11;
+
+ // Time this breakpoint was finalized as seen by the server in seconds
+ // resolution.
+ google.protobuf.Timestamp final_time = 12;
+
+ // E-mail address of the user that created this breakpoint
+ string user_email = 16;
+
+ // Breakpoint status.
+ //
+ // The status includes an error flag and a human readable message.
+ // This field is usually unset. The message can be either
+ // informational or an error message. Regardless, clients should always
+ // display the text message back to the user.
+ //
+ // Error status indicates complete failure of the breakpoint.
+ //
+ // Example (non-final state): `Still loading symbols...`
+ //
+ // Examples (final state):
+ //
+ // * `Invalid line number` referring to location
+ // * `Field f not found in class C` referring to condition
+ StatusMessage status = 10;
+
+ // The stack at breakpoint time.
+ repeated StackFrame stack_frames = 7;
+
+ // Values of evaluated expressions at breakpoint time.
+ // The evaluated expressions appear in exactly the same order they
+ // are listed in the `expressions` field.
+ // The `name` field holds the original expression text, the `value` or
+ // `members` field holds the result of the evaluated expression.
+ // If the expression cannot be evaluated, the `status` inside the `Variable`
+ // will indicate an error and contain the error text.
+ repeated Variable evaluated_expressions = 8;
+
+ // The `variable_table` exists to aid with computation, memory and network
+ // traffic optimization. It enables storing a variable once and reference
+ // it from multiple variables, including variables stored in the
+ // `variable_table` itself.
+ // For example, the same `this` object, which may appear at many levels of
+ // the stack, can have all of its data stored once in this table. The
+ // stack frame variables then would hold only a reference to it.
+ //
+ // The variable `var_table_index` field is an index into this repeated field.
+ // The stored objects are nameless and get their name from the referencing
+ // variable. The effective variable is a merge of the referencing variable
+ // and the referenced variable.
+ repeated Variable variable_table = 9;
+
+ // A set of custom breakpoint properties, populated by the agent, to be
+ // displayed to the user.
+ map<string, string> labels = 17;
+}
+
+// Represents the application to debug. The application may include one or more
+// replicated processes executing the same code. Each of these processes is
+// attached with a debugger agent, carrying out the debugging commands.
+// The agents attached to the same debuggee are identified by using exactly the
+// same field values when registering.
+message Debuggee {
+ // Unique identifier for the debuggee generated by the controller service.
+ string id = 1;
+
+ // Project the debuggee is associated with.
+ // Use the project number when registering a Google Cloud Platform project.
+ string project = 2;
+
+ // Debuggee uniquifier within the project.
+ // Any string that identifies the application within the project can be used.
+ // Including environment and version or build IDs is recommended.
+ string uniquifier = 3;
+
+ // Human readable description of the debuggee.
+ // Including a human-readable project name, environment name and version
+ // information is recommended.
+ string description = 4;
+
+ // If set to `true`, indicates that the debuggee is considered as inactive by
+ // the Controller service.
+ bool is_inactive = 5;
+
+ // Version ID of the agent release. The version ID is structured as
+ // following: `domain/type/vmajor.minor` (for example
+ // `google.com/gcp-java/v1.1`).
+ string agent_version = 6;
+
+ // If set to `true`, indicates that the agent should disable itself and
+ // detach from the debuggee.
+ bool is_disabled = 7;
+
+ // Human readable message to be displayed to the user about this debuggee.
+ // Absence of this field indicates no status. The message can be either
+ // informational or an error status.
+ StatusMessage status = 8;
+
+ // References to the locations and revisions of the source code used in the
+ // deployed application.
+ //
+ // NOTE: This field is deprecated. Consumers should use
+ // `ext_source_contexts` if it is not empty. Debug agents should populate
+ // both this field and `ext_source_contexts`.
+ repeated google.devtools.source.v1.SourceContext source_contexts = 9;
+
+ // References to the locations and revisions of the source code used in the
+ // deployed application.
+ //
+ // Contexts describing a remote repo related to the source code
+ // have a `category` label of `remote_repo`. Source snapshot source
+ // contexts have a `category` of `snapshot`.
+ repeated google.devtools.source.v1.ExtendedSourceContext ext_source_contexts = 13;
+
+ // A set of custom debuggee properties, populated by the agent, to be
+ // displayed to the user.
+ map<string, string> labels = 11;
+}
diff --git a/third_party/googleapis/google/devtools/clouddebugger/v2/debugger.proto b/third_party/googleapis/google/devtools/clouddebugger/v2/debugger.proto
new file mode 100644
index 0000000000..24ee3da4e7
--- /dev/null
+++ b/third_party/googleapis/google/devtools/clouddebugger/v2/debugger.proto
@@ -0,0 +1,196 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.devtools.clouddebugger.v2;
+
+import "google/api/annotations.proto";
+import "google/devtools/clouddebugger/v2/data.proto";
+import "google/protobuf/empty.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2;clouddebugger";
+option java_multiple_files = true;
+option java_outer_classname = "DebuggerProto";
+option java_package = "com.google.devtools.clouddebugger.v2";
+
+
+// The Debugger service provides the API that allows users to collect run-time
+// information from a running application, without stopping or slowing it down
+// and without modifying its state. An application may include one or
+// more replicated processes performing the same work.
+//
+// The application is represented using the Debuggee concept. The Debugger
+// service provides a way to query for available Debuggees, but does not
+// provide a way to create one. A debuggee is created using the Controller
+// service, usually by running a debugger agent with the application.
+//
+// The Debugger service enables the client to set one or more Breakpoints on a
+// Debuggee and collect the results of the set Breakpoints.
+service Debugger2 {
+ // Sets the breakpoint to the debuggee.
+ rpc SetBreakpoint(SetBreakpointRequest) returns (SetBreakpointResponse) {
+ option (google.api.http) = { post: "/v2/debugger/debuggees/{debuggee_id}/breakpoints/set" body: "breakpoint" };
+ }
+
+ // Gets breakpoint information.
+ rpc GetBreakpoint(GetBreakpointRequest) returns (GetBreakpointResponse) {
+ option (google.api.http) = { get: "/v2/debugger/debuggees/{debuggee_id}/breakpoints/{breakpoint_id}" };
+ }
+
+ // Deletes the breakpoint from the debuggee.
+ rpc DeleteBreakpoint(DeleteBreakpointRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v2/debugger/debuggees/{debuggee_id}/breakpoints/{breakpoint_id}" };
+ }
+
+ // Lists all breakpoints for the debuggee.
+ rpc ListBreakpoints(ListBreakpointsRequest) returns (ListBreakpointsResponse) {
+ option (google.api.http) = { get: "/v2/debugger/debuggees/{debuggee_id}/breakpoints" };
+ }
+
+ // Lists all the debuggees that the user can set breakpoints to.
+ rpc ListDebuggees(ListDebuggeesRequest) returns (ListDebuggeesResponse) {
+ option (google.api.http) = { get: "/v2/debugger/debuggees" };
+ }
+}
+
+// Request to set a breakpoint
+message SetBreakpointRequest {
+ // ID of the debuggee where the breakpoint is to be set.
+ string debuggee_id = 1;
+
+ // Breakpoint specification to set.
+ // The field 'location' of the breakpoint must be set.
+ Breakpoint breakpoint = 2;
+
+ // The client version making the call.
+ // Following: `domain/type/version` (e.g., `google.com/intellij/v1`).
+ string client_version = 4;
+}
+
+// Response for setting a breakpoint.
+message SetBreakpointResponse {
+ // Breakpoint resource.
+ // The field `id` is guaranteed to be set (in addition to the echoed fileds).
+ Breakpoint breakpoint = 1;
+}
+
+// Request to get breakpoint information.
+message GetBreakpointRequest {
+ // ID of the debuggee whose breakpoint to get.
+ string debuggee_id = 1;
+
+ // ID of the breakpoint to get.
+ string breakpoint_id = 2;
+
+ // The client version making the call.
+ // Following: `domain/type/version` (e.g., `google.com/intellij/v1`).
+ string client_version = 4;
+}
+
+// Response for getting breakpoint information.
+message GetBreakpointResponse {
+ // Complete breakpoint state.
+ // The fields `id` and `location` are guaranteed to be set.
+ Breakpoint breakpoint = 1;
+}
+
+// Request to delete a breakpoint.
+message DeleteBreakpointRequest {
+ // ID of the debuggee whose breakpoint to delete.
+ string debuggee_id = 1;
+
+ // ID of the breakpoint to delete.
+ string breakpoint_id = 2;
+
+ // The client version making the call.
+ // Following: `domain/type/version` (e.g., `google.com/intellij/v1`).
+ string client_version = 3;
+}
+
+// Request to list breakpoints.
+message ListBreakpointsRequest {
+ // Wrapper message for `Breakpoint.Action`. Defines a filter on the action
+ // field of breakpoints.
+ message BreakpointActionValue {
+ // Only breakpoints with the specified action will pass the filter.
+ Breakpoint.Action value = 1;
+ }
+
+ // ID of the debuggee whose breakpoints to list.
+ string debuggee_id = 1;
+
+ // When set to `true`, the response includes the list of breakpoints set by
+ // any user. Otherwise, it includes only breakpoints set by the caller.
+ bool include_all_users = 2;
+
+ // When set to `true`, the response includes active and inactive
+ // breakpoints. Otherwise, it includes only active breakpoints.
+ bool include_inactive = 3;
+
+ // When set, the response includes only breakpoints with the specified action.
+ BreakpointActionValue action = 4;
+
+ // This field is deprecated. The following fields are always stripped out of
+ // the result: `stack_frames`, `evaluated_expressions` and `variable_table`.
+ bool strip_results = 5;
+
+ // A wait token that, if specified, blocks the call until the breakpoints
+ // list has changed, or a server selected timeout has expired. The value
+ // should be set from the last response. The error code
+ // `google.rpc.Code.ABORTED` (RPC) is returned on wait timeout, which
+ // should be called again with the same `wait_token`.
+ string wait_token = 6;
+
+ // The client version making the call.
+ // Following: `domain/type/version` (e.g., `google.com/intellij/v1`).
+ string client_version = 8;
+}
+
+// Response for listing breakpoints.
+message ListBreakpointsResponse {
+ // List of breakpoints matching the request.
+ // The fields `id` and `location` are guaranteed to be set on each breakpoint.
+ // The fields: `stack_frames`, `evaluated_expressions` and `variable_table`
+ // are cleared on each breakpoint regardless of it's status.
+ repeated Breakpoint breakpoints = 1;
+
+ // A wait token that can be used in the next call to `list` (REST) or
+ // `ListBreakpoints` (RPC) to block until the list of breakpoints has changes.
+ string next_wait_token = 2;
+}
+
+// Request to list debuggees.
+message ListDebuggeesRequest {
+ // Project number of a Google Cloud project whose debuggees to list.
+ string project = 2;
+
+ // When set to `true`, the result includes all debuggees. Otherwise, the
+ // result includes only debuggees that are active.
+ bool include_inactive = 3;
+
+ // The client version making the call.
+ // Following: `domain/type/version` (e.g., `google.com/intellij/v1`).
+ string client_version = 4;
+}
+
+// Response for listing debuggees.
+message ListDebuggeesResponse {
+ // List of debuggees accessible to the calling user.
+ // Note that the `description` field is the only human readable field
+ // that should be displayed to the user.
+ // The fields `debuggee.id` and `description` fields are guaranteed to be
+ // set on each debuggee.
+ repeated Debuggee debuggees = 1;
+}
diff --git a/third_party/googleapis/google/devtools/clouderrorreporting/README.md b/third_party/googleapis/google/devtools/clouderrorreporting/README.md
new file mode 100644
index 0000000000..3edcebe6f2
--- /dev/null
+++ b/third_party/googleapis/google/devtools/clouderrorreporting/README.md
@@ -0,0 +1 @@
+Read more about the Stackdriver Error Reporting API [here](https://cloud.google.com/error-reporting/reference/)
diff --git a/third_party/googleapis/google/devtools/clouderrorreporting/errorreporting.yaml b/third_party/googleapis/google/devtools/clouderrorreporting/errorreporting.yaml
new file mode 100644
index 0000000000..75df65ced8
--- /dev/null
+++ b/third_party/googleapis/google/devtools/clouderrorreporting/errorreporting.yaml
@@ -0,0 +1,24 @@
+type: google.api.Service
+config_version: 3
+title: Stackdriver Error Reporting API
+name: clouderrorreporting.googleapis.com
+
+documentation:
+ summary: >-
+
+ Stackdriver Error Reporting groups and counts similar errors
+ from cloud services.
+ The Stackdriver Error Reporting API provides a way to report new errors and
+ read access to error groups and their associated errors.
+
+apis:
+- name: google.devtools.clouderrorreporting.v1beta1.ErrorGroupService
+- name: google.devtools.clouderrorreporting.v1beta1.ErrorStatsService
+- name: google.devtools.clouderrorreporting.v1beta1.ReportErrorsService
+
+authentication:
+ rules:
+ - selector: '*'
+ oauth:
+ canonical_scopes:
+ https://www.googleapis.com/auth/cloud-platform
diff --git a/third_party/googleapis/google/devtools/clouderrorreporting/v1beta1/common.proto b/third_party/googleapis/google/devtools/clouderrorreporting/v1beta1/common.proto
new file mode 100644
index 0000000000..33d5cf8248
--- /dev/null
+++ b/third_party/googleapis/google/devtools/clouderrorreporting/v1beta1/common.proto
@@ -0,0 +1,164 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.devtools.clouderrorreporting.v1beta1;
+
+import "google/api/annotations.proto";
+import "google/api/monitored_resource.proto";
+import "google/protobuf/timestamp.proto";
+
+option csharp_namespace = "Google.Cloud.ErrorReporting.V1Beta1";
+option go_package = "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1;clouderrorreporting";
+option java_multiple_files = true;
+option java_outer_classname = "CommonProto";
+option java_package = "com.google.devtools.clouderrorreporting.v1beta1";
+
+
+// Description of a group of similar error events.
+message ErrorGroup {
+ // The group resource name.
+ // Example: <code>projects/my-project-123/groups/my-groupid</code>
+ string name = 1;
+
+ // Group IDs are unique for a given project. If the same kind of error
+ // occurs in different service contexts, it will receive the same group ID.
+ string group_id = 2;
+
+ // Associated tracking issues.
+ repeated TrackingIssue tracking_issues = 3;
+}
+
+// Information related to tracking the progress on resolving the error.
+message TrackingIssue {
+ // A URL pointing to a related entry in an issue tracking system.
+ // Example: https://github.com/user/project/issues/4
+ string url = 1;
+}
+
+// An error event which is returned by the Error Reporting system.
+message ErrorEvent {
+ // Time when the event occurred as provided in the error report.
+ // If the report did not contain a timestamp, the time the error was received
+ // by the Error Reporting system is used.
+ google.protobuf.Timestamp event_time = 1;
+
+ // The `ServiceContext` for which this error was reported.
+ ServiceContext service_context = 2;
+
+ // The stack trace that was reported or logged by the service.
+ string message = 3;
+
+ // Data about the context in which the error occurred.
+ ErrorContext context = 5;
+}
+
+// Describes a running service that sends errors.
+// Its version changes over time and multiple versions can run in parallel.
+message ServiceContext {
+ // An identifier of the service, such as the name of the
+ // executable, job, or Google App Engine service name. This field is expected
+ // to have a low number of values that are relatively stable over time, as
+ // opposed to `version`, which can be changed whenever new code is deployed.
+ //
+ // Contains the service name for error reports extracted from Google
+ // App Engine logs or `default` if the App Engine default service is used.
+ string service = 2;
+
+ // Represents the source code version that the developer provided,
+ // which could represent a version label or a Git SHA-1 hash, for example.
+ string version = 3;
+
+ // Type of the MonitoredResource. List of possible values:
+ // https://cloud.google.com/monitoring/api/resources
+ //
+ // Value is set automatically for incoming errors and must not be set when
+ // reporting errors.
+ string resource_type = 4;
+}
+
+// A description of the context in which an error occurred.
+// This data should be provided by the application when reporting an error,
+// unless the
+// error report has been generated automatically from Google App Engine logs.
+message ErrorContext {
+ // The HTTP request which was processed when the error was
+ // triggered.
+ HttpRequestContext http_request = 1;
+
+ // The user who caused or was affected by the crash.
+ // This can be a user ID, an email address, or an arbitrary token that
+ // uniquely identifies the user.
+ // When sending an error report, leave this field empty if the user was not
+ // logged in. In this case the
+ // Error Reporting system will use other data, such as remote IP address, to
+ // distinguish affected users. See `affected_users_count` in
+ // `ErrorGroupStats`.
+ string user = 2;
+
+ // The location in the source code where the decision was made to
+ // report the error, usually the place where it was logged.
+ // For a logged exception this would be the source line where the
+ // exception is logged, usually close to the place where it was
+ // caught. This value is in contrast to `Exception.cause_location`,
+ // which describes the source line where the exception was thrown.
+ SourceLocation report_location = 3;
+}
+
+// HTTP request data that is related to a reported error.
+// This data should be provided by the application when reporting an error,
+// unless the
+// error report has been generated automatically from Google App Engine logs.
+message HttpRequestContext {
+ // The type of HTTP request, such as `GET`, `POST`, etc.
+ string method = 1;
+
+ // The URL of the request.
+ string url = 2;
+
+ // The user agent information that is provided with the request.
+ string user_agent = 3;
+
+ // The referrer information that is provided with the request.
+ string referrer = 4;
+
+ // The HTTP response status code for the request.
+ int32 response_status_code = 5;
+
+ // The IP address from which the request originated.
+ // This can be IPv4, IPv6, or a token which is derived from the
+ // IP address, depending on the data that has been provided
+ // in the error report.
+ string remote_ip = 6;
+}
+
+// Indicates a location in the source code of the service for which
+// errors are reported.
+// This data should be provided by the application when reporting an error,
+// unless the error report has been generated automatically from Google App
+// Engine logs. All fields are optional.
+message SourceLocation {
+ // The source code filename, which can include a truncated relative
+ // path, or a full path from a production machine.
+ string file_path = 1;
+
+ // 1-based. 0 indicates that the line number is unknown.
+ int32 line_number = 2;
+
+ // Human-readable name of a function or method.
+ // The value can include optional context like the class or package name.
+ // For example, `my.package.MyClass.method` in case of Java.
+ string function_name = 4;
+}
diff --git a/third_party/googleapis/google/devtools/clouderrorreporting/v1beta1/error_group_service.proto b/third_party/googleapis/google/devtools/clouderrorreporting/v1beta1/error_group_service.proto
new file mode 100644
index 0000000000..e607e368f2
--- /dev/null
+++ b/third_party/googleapis/google/devtools/clouderrorreporting/v1beta1/error_group_service.proto
@@ -0,0 +1,60 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.devtools.clouderrorreporting.v1beta1;
+
+import "google/api/annotations.proto";
+import "google/devtools/clouderrorreporting/v1beta1/common.proto";
+
+option csharp_namespace = "Google.Cloud.ErrorReporting.V1Beta1";
+option go_package = "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1;clouderrorreporting";
+option java_multiple_files = true;
+option java_outer_classname = "ErrorGroupServiceProto";
+option java_package = "com.google.devtools.clouderrorreporting.v1beta1";
+
+
+// Service for retrieving and updating individual error groups.
+service ErrorGroupService {
+ // Get the specified group.
+ rpc GetGroup(GetGroupRequest) returns (ErrorGroup) {
+ option (google.api.http) = { get: "/v1beta1/{group_name=projects/*/groups/*}" };
+ }
+
+ // Replace the data for the specified group.
+ // Fails if the group does not exist.
+ rpc UpdateGroup(UpdateGroupRequest) returns (ErrorGroup) {
+ option (google.api.http) = { put: "/v1beta1/{group.name=projects/*/groups/*}" body: "group" };
+ }
+}
+
+// A request to return an individual group.
+message GetGroupRequest {
+ // [Required] The group resource name. Written as
+ // <code>projects/<var>projectID</var>/groups/<var>group_name</var></code>.
+ // Call
+ // <a href="/error-reporting/reference/rest/v1beta1/projects.groupStats/list">
+ // <code>groupStats.list</code></a> to return a list of groups belonging to
+ // this project.
+ //
+ // Example: <code>projects/my-project-123/groups/my-group</code>
+ string group_name = 1;
+}
+
+// A request to replace the existing data for the given group.
+message UpdateGroupRequest {
+ // [Required] The group which replaces the resource on the server.
+ ErrorGroup group = 1;
+}
diff --git a/third_party/googleapis/google/devtools/clouderrorreporting/v1beta1/error_stats_service.proto b/third_party/googleapis/google/devtools/clouderrorreporting/v1beta1/error_stats_service.proto
new file mode 100644
index 0000000000..f8a0e837b2
--- /dev/null
+++ b/third_party/googleapis/google/devtools/clouderrorreporting/v1beta1/error_stats_service.proto
@@ -0,0 +1,341 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.devtools.clouderrorreporting.v1beta1;
+
+import "google/api/annotations.proto";
+import "google/devtools/clouderrorreporting/v1beta1/common.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/timestamp.proto";
+
+option csharp_namespace = "Google.Cloud.ErrorReporting.V1Beta1";
+option go_package = "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1;clouderrorreporting";
+option java_multiple_files = true;
+option java_outer_classname = "ErrorStatsServiceProto";
+option java_package = "com.google.devtools.clouderrorreporting.v1beta1";
+
+
+// An API for retrieving and managing error statistics as well as data for
+// individual events.
+service ErrorStatsService {
+ // Lists the specified groups.
+ rpc ListGroupStats(ListGroupStatsRequest) returns (ListGroupStatsResponse) {
+ option (google.api.http) = { get: "/v1beta1/{project_name=projects/*}/groupStats" };
+ }
+
+ // Lists the specified events.
+ rpc ListEvents(ListEventsRequest) returns (ListEventsResponse) {
+ option (google.api.http) = { get: "/v1beta1/{project_name=projects/*}/events" };
+ }
+
+ // Deletes all error events of a given project.
+ rpc DeleteEvents(DeleteEventsRequest) returns (DeleteEventsResponse) {
+ option (google.api.http) = { delete: "/v1beta1/{project_name=projects/*}/events" };
+ }
+}
+
+// Specifies a set of `ErrorGroupStats` to return.
+message ListGroupStatsRequest {
+ // [Required] The resource name of the Google Cloud Platform project. Written
+ // as <code>projects/</code> plus the
+ // <a href="https://support.google.com/cloud/answer/6158840">Google Cloud
+ // Platform project ID</a>.
+ //
+ // Example: <code>projects/my-project-123</code>.
+ string project_name = 1;
+
+ // [Optional] List all <code>ErrorGroupStats</code> with these IDs.
+ repeated string group_id = 2;
+
+ // [Optional] List only <code>ErrorGroupStats</code> which belong to a service
+ // context that matches the filter.
+ // Data for all service contexts is returned if this field is not specified.
+ ServiceContextFilter service_filter = 3;
+
+ // [Optional] List data for the given time range.
+ // If not set a default time range is used. The field time_range_begin
+ // in the response will specify the beginning of this time range.
+ // Only <code>ErrorGroupStats</code> with a non-zero count in the given time
+ // range are returned, unless the request contains an explicit group_id list.
+ // If a group_id list is given, also <code>ErrorGroupStats</code> with zero
+ // occurrences are returned.
+ QueryTimeRange time_range = 5;
+
+ // [Optional] The preferred duration for a single returned `TimedCount`.
+ // If not set, no timed counts are returned.
+ google.protobuf.Duration timed_count_duration = 6;
+
+ // [Optional] The alignment of the timed counts to be returned.
+ // Default is `ALIGNMENT_EQUAL_AT_END`.
+ TimedCountAlignment alignment = 7;
+
+ // [Optional] Time where the timed counts shall be aligned if rounded
+ // alignment is chosen. Default is 00:00 UTC.
+ google.protobuf.Timestamp alignment_time = 8;
+
+ // [Optional] The sort order in which the results are returned.
+ // Default is `COUNT_DESC`.
+ ErrorGroupOrder order = 9;
+
+ // [Optional] The maximum number of results to return per response.
+ // Default is 20.
+ int32 page_size = 11;
+
+ // [Optional] A `next_page_token` provided by a previous response. To view
+ // additional results, pass this token along with the identical query
+ // parameters as the first request.
+ string page_token = 12;
+}
+
+// Contains a set of requested error group stats.
+message ListGroupStatsResponse {
+ // The error group stats which match the given request.
+ repeated ErrorGroupStats error_group_stats = 1;
+
+ // If non-empty, more results are available.
+ // Pass this token, along with the same query parameters as the first
+ // request, to view the next page of results.
+ string next_page_token = 2;
+
+ // The timestamp specifies the start time to which the request was restricted.
+ // The start time is set based on the requested time range. It may be adjusted
+ // to a later time if a project has exceeded the storage quota and older data
+ // has been deleted.
+ google.protobuf.Timestamp time_range_begin = 4;
+}
+
+// Data extracted for a specific group based on certain filter criteria,
+// such as a given time period and/or service filter.
+message ErrorGroupStats {
+ // Group data that is independent of the filter criteria.
+ ErrorGroup group = 1;
+
+ // Approximate total number of events in the given group that match
+ // the filter criteria.
+ int64 count = 2;
+
+ // Approximate number of affected users in the given group that
+ // match the filter criteria.
+ // Users are distinguished by data in the `ErrorContext` of the
+ // individual error events, such as their login name or their remote
+ // IP address in case of HTTP requests.
+ // The number of affected users can be zero even if the number of
+ // errors is non-zero if no data was provided from which the
+ // affected user could be deduced.
+ // Users are counted based on data in the request
+ // context that was provided in the error report. If more users are
+ // implicitly affected, such as due to a crash of the whole service,
+ // this is not reflected here.
+ int64 affected_users_count = 3;
+
+ // Approximate number of occurrences over time.
+ // Timed counts returned by ListGroups are guaranteed to be:
+ //
+ // - Inside the requested time interval
+ // - Non-overlapping, and
+ // - Ordered by ascending time.
+ repeated TimedCount timed_counts = 4;
+
+ // Approximate first occurrence that was ever seen for this group
+ // and which matches the given filter criteria, ignoring the
+ // time_range that was specified in the request.
+ google.protobuf.Timestamp first_seen_time = 5;
+
+ // Approximate last occurrence that was ever seen for this group and
+ // which matches the given filter criteria, ignoring the time_range
+ // that was specified in the request.
+ google.protobuf.Timestamp last_seen_time = 6;
+
+ // Service contexts with a non-zero error count for the given filter
+ // criteria. This list can be truncated if multiple services are affected.
+ // Refer to `num_affected_services` for the total count.
+ repeated ServiceContext affected_services = 7;
+
+ // The total number of services with a non-zero error count for the given
+ // filter criteria.
+ int32 num_affected_services = 8;
+
+ // An arbitrary event that is chosen as representative for the whole group.
+ // The representative event is intended to be used as a quick preview for
+ // the whole group. Events in the group are usually sufficiently similar
+ // to each other such that showing an arbitrary representative provides
+ // insight into the characteristics of the group as a whole.
+ ErrorEvent representative = 9;
+}
+
+// The number of errors in a given time period.
+// All numbers are approximate since the error events are sampled
+// before counting them.
+message TimedCount {
+ // Approximate number of occurrences in the given time period.
+ int64 count = 1;
+
+ // Start of the time period to which `count` refers (included).
+ google.protobuf.Timestamp start_time = 2;
+
+ // End of the time period to which `count` refers (excluded).
+ google.protobuf.Timestamp end_time = 3;
+}
+
+// Specifies a set of error events to return.
+message ListEventsRequest {
+ // [Required] The resource name of the Google Cloud Platform project. Written
+ // as `projects/` plus the
+ // [Google Cloud Platform project
+ // ID](https://support.google.com/cloud/answer/6158840).
+ // Example: `projects/my-project-123`.
+ string project_name = 1;
+
+ // [Required] The group for which events shall be returned.
+ string group_id = 2;
+
+ // [Optional] List only ErrorGroups which belong to a service context that
+ // matches the filter.
+ // Data for all service contexts is returned if this field is not specified.
+ ServiceContextFilter service_filter = 3;
+
+ // [Optional] List only data for the given time range.
+ // If not set a default time range is used. The field time_range_begin
+ // in the response will specify the beginning of this time range.
+ QueryTimeRange time_range = 4;
+
+ // [Optional] The maximum number of results to return per response.
+ int32 page_size = 6;
+
+ // [Optional] A `next_page_token` provided by a previous response.
+ string page_token = 7;
+}
+
+// Contains a set of requested error events.
+message ListEventsResponse {
+ // The error events which match the given request.
+ repeated ErrorEvent error_events = 1;
+
+ // If non-empty, more results are available.
+ // Pass this token, along with the same query parameters as the first
+ // request, to view the next page of results.
+ string next_page_token = 2;
+
+ // The timestamp specifies the start time to which the request was restricted.
+ google.protobuf.Timestamp time_range_begin = 4;
+}
+
+// Requests might be rejected or the resulting timed count durations might be
+// adjusted for lower durations.
+message QueryTimeRange {
+ // The supported time ranges.
+ enum Period {
+ // Do not use.
+ PERIOD_UNSPECIFIED = 0;
+
+ // Retrieve data for the last hour.
+ // Recommended minimum timed count duration: 1 min.
+ PERIOD_1_HOUR = 1;
+
+ // Retrieve data for the last 6 hours.
+ // Recommended minimum timed count duration: 10 min.
+ PERIOD_6_HOURS = 2;
+
+ // Retrieve data for the last day.
+ // Recommended minimum timed count duration: 1 hour.
+ PERIOD_1_DAY = 3;
+
+ // Retrieve data for the last week.
+ // Recommended minimum timed count duration: 6 hours.
+ PERIOD_1_WEEK = 4;
+
+ // Retrieve data for the last 30 days.
+ // Recommended minimum timed count duration: 1 day.
+ PERIOD_30_DAYS = 5;
+ }
+
+ // Restricts the query to the specified time range.
+ Period period = 1;
+}
+
+// Specifies criteria for filtering a subset of service contexts.
+// The fields in the filter correspond to the fields in `ServiceContext`.
+// Only exact, case-sensitive matches are supported.
+// If a field is unset or empty, it matches arbitrary values.
+message ServiceContextFilter {
+ // [Optional] The exact value to match against
+ // [`ServiceContext.service`](/error-reporting/reference/rest/v1beta1/ServiceContext#FIELDS.service).
+ string service = 2;
+
+ // [Optional] The exact value to match against
+ // [`ServiceContext.version`](/error-reporting/reference/rest/v1beta1/ServiceContext#FIELDS.version).
+ string version = 3;
+
+ // [Optional] The exact value to match against
+ // [`ServiceContext.resource_type`](/error-reporting/reference/rest/v1beta1/ServiceContext#FIELDS.resource_type).
+ string resource_type = 4;
+}
+
+// Deletes all events in the project.
+message DeleteEventsRequest {
+ // [Required] The resource name of the Google Cloud Platform project. Written
+ // as `projects/` plus the
+ // [Google Cloud Platform project
+ // ID](https://support.google.com/cloud/answer/6158840).
+ // Example: `projects/my-project-123`.
+ string project_name = 1;
+}
+
+// Response message for deleting error events.
+message DeleteEventsResponse {
+
+}
+
+// Specifies how the time periods of error group counts are aligned.
+enum TimedCountAlignment {
+ // No alignment specified.
+ ERROR_COUNT_ALIGNMENT_UNSPECIFIED = 0;
+
+ // The time periods shall be consecutive, have width equal to the
+ // requested duration, and be aligned at the `alignment_time` provided in
+ // the request.
+ // The `alignment_time` does not have to be inside the query period but
+ // even if it is outside, only time periods are returned which overlap
+ // with the query period.
+ // A rounded alignment will typically result in a
+ // different size of the first or the last time period.
+ ALIGNMENT_EQUAL_ROUNDED = 1;
+
+ // The time periods shall be consecutive, have width equal to the
+ // requested duration, and be aligned at the end of the requested time
+ // period. This can result in a different size of the
+ // first time period.
+ ALIGNMENT_EQUAL_AT_END = 2;
+}
+
+// A sorting order of error groups.
+enum ErrorGroupOrder {
+ // No group order specified.
+ GROUP_ORDER_UNSPECIFIED = 0;
+
+ // Total count of errors in the given time window in descending order.
+ COUNT_DESC = 1;
+
+ // Timestamp when the group was last seen in the given time window
+ // in descending order.
+ LAST_SEEN_DESC = 2;
+
+ // Timestamp when the group was created in descending order.
+ CREATED_DESC = 3;
+
+ // Number of affected users in the given time window in descending order.
+ AFFECTED_USERS_DESC = 4;
+}
diff --git a/third_party/googleapis/google/devtools/clouderrorreporting/v1beta1/errorreporting_gapic.yaml b/third_party/googleapis/google/devtools/clouderrorreporting/v1beta1/errorreporting_gapic.yaml
new file mode 100644
index 0000000000..5e5b8511b0
--- /dev/null
+++ b/third_party/googleapis/google/devtools/clouderrorreporting/v1beta1/errorreporting_gapic.yaml
@@ -0,0 +1,224 @@
+type: com.google.api.codegen.ConfigProto
+language_settings:
+ java:
+ package_name: com.google.cloud.errorreporting.spi.v1beta1
+ python:
+ package_name: google.cloud.gapic.errorreporting.v1beta1
+ go:
+ package_name: cloud.google.com/go/errorreporting/apiv1beta1
+ csharp:
+ package_name: Google.Cloud.ErrorReporting.V1Beta1
+ ruby:
+ package_name: Google::Cloud::ErrorReporting::V1beta1
+ php:
+ package_name: Google\Cloud\ErrorReporting\V1beta1
+ nodejs:
+ package_name: errorreporting.v1beta1
+ domain_layer_location: google-cloud
+license_header:
+ copyright_file: copyright-google.txt
+ license_file: license-header-apache-2.0.txt
+collections:
+- name_pattern: projects/{project}
+ entity_name: project
+- name_pattern: projects/{project}/groups/{group}
+ entity_name: group
+interfaces:
+- name: google.devtools.clouderrorreporting.v1beta1.ErrorGroupService
+ collections:
+ - name_pattern: projects/{project}/groups/{group}
+ entity_name: group
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 20000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 20000
+ total_timeout_millis: 600000
+ methods:
+ - name: GetGroup
+ flattening:
+ groups:
+ - parameters:
+ - group_name
+ required_fields:
+ - group_name
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ group_name: group
+ resource_name_treatment: STATIC_TYPES
+ timeout_millis: 60000
+ - name: UpdateGroup
+ flattening:
+ groups:
+ - parameters:
+ - group
+ required_fields:
+ - group
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ group.name: group
+ resource_name_treatment: STATIC_TYPES
+ timeout_millis: 60000
+- name: google.devtools.clouderrorreporting.v1beta1.ReportErrorsService
+ smoke_test:
+ method: ReportErrorEvent
+ init_fields:
+ - project_name%project=$PROJECT_ID
+ - event.message="[MESSAGE]"
+ - event.service_context.service="[SERVICE]"
+ - event.context.report_location.file_path="path/to/file.lang"
+ - event.context.report_location.line_number=42
+ - event.context.report_location.function_name="meaningOfLife"
+ collections:
+ - name_pattern: projects/{project}
+ entity_name: project
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 20000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 20000
+ total_timeout_millis: 600000
+ methods:
+ - name: ReportErrorEvent
+ flattening:
+ groups:
+ - parameters:
+ - project_name
+ - event
+ required_fields:
+ - project_name
+ - event
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ project_name: project
+ resource_name_treatment: STATIC_TYPES
+ timeout_millis: 60000
+- name: google.devtools.clouderrorreporting.v1beta1.ErrorStatsService
+ collections:
+ - name_pattern: projects/{project}
+ entity_name: project
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 20000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 20000
+ total_timeout_millis: 600000
+ methods:
+ - name: ListGroupStats
+ flattening:
+ groups:
+ - parameters:
+ - project_name
+ - time_range
+ required_fields:
+ - project_name
+ - time_range
+ request_object_method: true
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: error_group_stats
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ project_name: project
+ resource_name_treatment: STATIC_TYPES
+ timeout_millis: 60000
+ - name: ListEvents
+ flattening:
+ groups:
+ - parameters:
+ - project_name
+ - group_id
+ required_fields:
+ - project_name
+ - group_id
+ request_object_method: true
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: error_events
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ project_name: project
+ resource_name_treatment: STATIC_TYPES
+ timeout_millis: 60000
+ - name: DeleteEvents
+ flattening:
+ groups:
+ - parameters:
+ - project_name
+ required_fields:
+ - project_name
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ project_name: project
+ resource_name_treatment: STATIC_TYPES
+ timeout_millis: 60000
+resource_name_generation:
+- message_name: ErrorGroup
+ field_entity_map:
+ name: group
+- message_name: GetGroupRequest
+ field_entity_map:
+ group_name: group
+- message_name: ListGroupStatsRequest
+ field_entity_map:
+ project_name: project
+- message_name: ListEventsRequest
+ field_entity_map:
+ project_name: project
+- message_name: DeleteEventsRequest
+ field_entity_map:
+ project_name: project
+- message_name: ReportErrorEventRequest
+ field_entity_map:
+ project_name: project
diff --git a/third_party/googleapis/google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto b/third_party/googleapis/google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto
new file mode 100644
index 0000000000..e48fd00767
--- /dev/null
+++ b/third_party/googleapis/google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto
@@ -0,0 +1,81 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.devtools.clouderrorreporting.v1beta1;
+
+import "google/api/annotations.proto";
+import "google/devtools/clouderrorreporting/v1beta1/common.proto";
+import "google/protobuf/timestamp.proto";
+
+option csharp_namespace = "Google.Cloud.ErrorReporting.V1Beta1";
+option go_package = "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1;clouderrorreporting";
+option java_multiple_files = true;
+option java_outer_classname = "ReportErrorsServiceProto";
+option java_package = "com.google.devtools.clouderrorreporting.v1beta1";
+
+
+// An API for reporting error events.
+service ReportErrorsService {
+ // Report an individual error event.
+ //
+ // This endpoint accepts <strong>either</strong> an OAuth token,
+ // <strong>or</strong> an
+ // <a href="https://support.google.com/cloud/answer/6158862">API key</a>
+ // for authentication. To use an API key, append it to the URL as the value of
+ // a `key` parameter. For example:
+ // <pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
+ rpc ReportErrorEvent(ReportErrorEventRequest) returns (ReportErrorEventResponse) {
+ option (google.api.http) = { post: "/v1beta1/{project_name=projects/*}/events:report" body: "event" };
+ }
+}
+
+// A request for reporting an individual error event.
+message ReportErrorEventRequest {
+ // [Required] The resource name of the Google Cloud Platform project. Written
+ // as `projects/` plus the
+ // [Google Cloud Platform project ID](https://support.google.com/cloud/answer/6158840).
+ // Example: `projects/my-project-123`.
+ string project_name = 1;
+
+ // [Required] The error event to be reported.
+ ReportedErrorEvent event = 2;
+}
+
+// Response for reporting an individual error event.
+// Data may be added to this message in the future.
+message ReportErrorEventResponse {
+
+}
+
+// An error event which is reported to the Error Reporting system.
+message ReportedErrorEvent {
+ // [Optional] Time when the event occurred.
+ // If not provided, the time when the event was received by the
+ // Error Reporting system will be used.
+ google.protobuf.Timestamp event_time = 1;
+
+ // [Required] The service context in which this error has occurred.
+ ServiceContext service_context = 2;
+
+ // [Required] A message describing the error. The message can contain an
+ // exception stack in one of the supported programming languages and formats.
+ // In that case, the message is parsed and detailed exception information
+ // is returned when retrieving the error event again.
+ string message = 3;
+
+ // [Optional] A description of the context in which the error occurred.
+ ErrorContext context = 4;
+}
diff --git a/third_party/googleapis/google/devtools/cloudtrace/trace.yaml b/third_party/googleapis/google/devtools/cloudtrace/trace.yaml
new file mode 100644
index 0000000000..195d17dee2
--- /dev/null
+++ b/third_party/googleapis/google/devtools/cloudtrace/trace.yaml
@@ -0,0 +1,32 @@
+type: google.api.Service
+title: Stackdriver Trace API
+config_version: 2
+name: cloudtrace.googleapis.com
+
+documentation:
+ summary: >
+ Send and retrieve trace data from Stackdriver Trace. Data is
+ generated and available by default for all App Engine applications.
+ Data from other applications can be written to Stackdriver Trace
+ for display, reporting, and analysis.
+
+apis:
+- name: google.devtools.cloudtrace.v1.TraceService
+
+http:
+ rules:
+ - selector: google.devtools.cloudtrace.v1.TraceService.GetTrace
+ get: /v1/projects/{project_id}/traces/{trace_id}
+ - selector: google.devtools.cloudtrace.v1.TraceService.ListTraces
+ get: /v1/projects/{project_id}/traces
+ - selector: google.devtools.cloudtrace.v1.TraceService.PatchTraces
+ patch: /v1/projects/{project_id}/traces
+ body: traces
+
+authentication:
+ rules:
+ - selector: '*'
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/cloud-platform,
+ https://www.googleapis.com/auth/trace.readonly,
+ https://www.googleapis.com/auth/trace.append
diff --git a/third_party/googleapis/google/devtools/cloudtrace/v1/trace.proto b/third_party/googleapis/google/devtools/cloudtrace/v1/trace.proto
new file mode 100644
index 0000000000..7344e47d0c
--- /dev/null
+++ b/third_party/googleapis/google/devtools/cloudtrace/v1/trace.proto
@@ -0,0 +1,220 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.devtools.cloudtrace.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/timestamp.proto";
+
+option csharp_namespace = "Google.Cloud.Trace.V1";
+option go_package = "google.golang.org/genproto/googleapis/devtools/cloudtrace/v1;cloudtrace";
+option java_multiple_files = true;
+option java_outer_classname = "TraceProto";
+option java_package = "com.google.devtools.cloudtrace.v1";
+
+
+// This file describes an API for collecting and viewing traces and spans
+// within a trace. A Trace is a collection of spans corresponding to a single
+// operation or set of operations for an application. A span is an individual
+// timed event which forms a node of the trace tree. Spans for a single trace
+// may span multiple services.
+service TraceService {
+ // Returns of a list of traces that match the specified filter conditions.
+ rpc ListTraces(ListTracesRequest) returns (ListTracesResponse) {
+ option (google.api.http) = { get: "/v1/projects/{project_id}/traces" };
+ }
+
+ // Gets a single trace by its ID.
+ rpc GetTrace(GetTraceRequest) returns (Trace) {
+ option (google.api.http) = { get: "/v1/projects/{project_id}/traces/{trace_id}" };
+ }
+
+ // Sends new traces to Stackdriver Trace or updates existing traces. If the ID
+ // of a trace that you send matches that of an existing trace, any fields
+ // in the existing trace and its spans are overwritten by the provided values,
+ // and any new fields provided are merged with the existing trace data. If the
+ // ID does not match, a new trace is created.
+ rpc PatchTraces(PatchTracesRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { patch: "/v1/projects/{project_id}/traces" body: "traces" };
+ }
+}
+
+// A trace describes how long it takes for an application to perform an
+// operation. It consists of a set of spans, each of which represent a single
+// timed event within the operation.
+message Trace {
+ // Project ID of the Cloud project where the trace data is stored.
+ string project_id = 1;
+
+ // Globally unique identifier for the trace. This identifier is a 128-bit
+ // numeric value formatted as a 32-byte hex string.
+ string trace_id = 2;
+
+ // Collection of spans in the trace.
+ repeated TraceSpan spans = 3;
+}
+
+// List of new or updated traces.
+message Traces {
+ // List of traces.
+ repeated Trace traces = 1;
+}
+
+// A span represents a single timed event within a trace. Spans can be nested
+// and form a trace tree. Often, a trace contains a root span that describes the
+// end-to-end latency of an operation and, optionally, one or more subspans for
+// its suboperations. Spans do not need to be contiguous. There may be gaps
+// between spans in a trace.
+message TraceSpan {
+ // Type of span. Can be used to specify additional relationships between spans
+ // in addition to a parent/child relationship.
+ enum SpanKind {
+ // Unspecified.
+ SPAN_KIND_UNSPECIFIED = 0;
+
+ // Indicates that the span covers server-side handling of an RPC or other
+ // remote network request.
+ RPC_SERVER = 1;
+
+ // Indicates that the span covers the client-side wrapper around an RPC or
+ // other remote request.
+ RPC_CLIENT = 2;
+ }
+
+ // Identifier for the span. Must be a 64-bit integer other than 0 and
+ // unique within a trace.
+ fixed64 span_id = 1;
+
+ // Distinguishes between spans generated in a particular context. For example,
+ // two spans with the same name may be distinguished using `RPC_CLIENT`
+ // and `RPC_SERVER` to identify queueing latency associated with the span.
+ SpanKind kind = 2;
+
+ // Name of the trace. The trace name is sanitized and displayed in the
+ // Stackdriver Trace tool in the Google Developers Console.
+ // The name may be a method name or some other per-call site name.
+ // For the same executable and the same call point, a best practice is
+ // to use a consistent name, which makes it easier to correlate
+ // cross-trace spans.
+ string name = 3;
+
+ // Start time of the span in nanoseconds from the UNIX epoch.
+ google.protobuf.Timestamp start_time = 4;
+
+ // End time of the span in nanoseconds from the UNIX epoch.
+ google.protobuf.Timestamp end_time = 5;
+
+ // ID of the parent span, if any. Optional.
+ fixed64 parent_span_id = 6;
+
+ // Collection of labels associated with the span.
+ map<string, string> labels = 7;
+}
+
+// The request message for the `ListTraces` method. All fields are required
+// unless specified.
+message ListTracesRequest {
+ // Type of data returned for traces in the list.
+ enum ViewType {
+ // Default is `MINIMAL` if unspecified.
+ VIEW_TYPE_UNSPECIFIED = 0;
+
+ // Minimal view of the trace record that contains only the project
+ // and trace IDs.
+ MINIMAL = 1;
+
+ // Root span view of the trace record that returns the root spans along
+ // with the minimal trace data.
+ ROOTSPAN = 2;
+
+ // Complete view of the trace record that contains the actual trace data.
+ // This is equivalent to calling the REST `get` or RPC `GetTrace` method
+ // using the ID of each listed trace.
+ COMPLETE = 3;
+ }
+
+ // ID of the Cloud project where the trace data is stored.
+ string project_id = 1;
+
+ // Type of data returned for traces in the list. Optional. Default is
+ // `MINIMAL`.
+ ViewType view = 2;
+
+ // Maximum number of traces to return. If not specified or <= 0, the
+ // implementation selects a reasonable value. The implementation may
+ // return fewer traces than the requested page size. Optional.
+ int32 page_size = 3;
+
+ // Token identifying the page of results to return. If provided, use the
+ // value of the `next_page_token` field from a previous request. Optional.
+ string page_token = 4;
+
+ // End of the time interval (inclusive) during which the trace data was
+ // collected from the application.
+ google.protobuf.Timestamp start_time = 5;
+
+ // Start of the time interval (inclusive) during which the trace data was
+ // collected from the application.
+ google.protobuf.Timestamp end_time = 6;
+
+ // An optional filter for the request.
+ string filter = 7;
+
+ // Field used to sort the returned traces. Optional.
+ // Can be one of the following:
+ //
+ // * `trace_id`
+ // * `name` (`name` field of root span in the trace)
+ // * `duration` (difference between `end_time` and `start_time` fields of
+ // the root span)
+ // * `start` (`start_time` field of the root span)
+ //
+ // Descending order can be specified by appending `desc` to the sort field
+ // (for example, `name desc`).
+ //
+ // Only one sort field is permitted.
+ string order_by = 8;
+}
+
+// The response message for the `ListTraces` method.
+message ListTracesResponse {
+ // List of trace records returned.
+ repeated Trace traces = 1;
+
+ // If defined, indicates that there are more traces that match the request
+ // and that this value should be passed to the next request to continue
+ // retrieving additional traces.
+ string next_page_token = 2;
+}
+
+// The request message for the `GetTrace` method.
+message GetTraceRequest {
+ // ID of the Cloud project where the trace data is stored.
+ string project_id = 1;
+
+ // ID of the trace to return.
+ string trace_id = 2;
+}
+
+// The request message for the `PatchTraces` method.
+message PatchTracesRequest {
+ // ID of the Cloud project where the trace data is stored.
+ string project_id = 1;
+
+ // The body of the message.
+ Traces traces = 2;
+}
diff --git a/third_party/googleapis/google/devtools/cloudtrace/v1/trace_gapic.yaml b/third_party/googleapis/google/devtools/cloudtrace/v1/trace_gapic.yaml
new file mode 100644
index 0000000000..f04e5a75b0
--- /dev/null
+++ b/third_party/googleapis/google/devtools/cloudtrace/v1/trace_gapic.yaml
@@ -0,0 +1,88 @@
+type: com.google.api.codegen.ConfigProto
+language_settings:
+ java:
+ package_name: com.google.cloud.trace.spi.v1
+ python:
+ package_name: google.cloud.gapic.trace.v1
+ ruby:
+ package_name: Google::Cloud::Trace::V1
+ php:
+ package_name: Google\Cloud\Trace\V1
+ nodejs:
+ package_name: trace.v1
+ domain_layer_location: google-cloud
+ go:
+ package_name: cloud.google.com/go/trace/apiv1
+ domain_layer_location: cloud.google.com/go/trace
+ csharp:
+ package_name: Google.Cloud.Trace.V1
+license_header:
+ copyright_file: copyright-google.txt
+ license_file: license-header-apache-2.0.txt
+interfaces:
+- name: google.devtools.cloudtrace.v1.TraceService
+ smoke_test:
+ method: ListTraces
+ init_fields:
+ - project_id=$PROJECT_ID
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.2
+ max_retry_delay_millis: 1000
+ initial_rpc_timeout_millis: 20000
+ rpc_timeout_multiplier: 1.5
+ max_rpc_timeout_millis: 30000
+ total_timeout_millis: 45000
+ methods:
+ - name: PatchTraces
+ flattening:
+ groups:
+ - parameters:
+ - project_id
+ - traces
+ required_fields:
+ - project_id
+ - traces
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ request_object_method: true
+ - name: GetTrace
+ flattening:
+ groups:
+ - parameters:
+ - project_id
+ - trace_id
+ required_fields:
+ - project_id
+ - trace_id
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ request_object_method: false
+ - name: ListTraces
+ page_streaming:
+ request:
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: traces
+ flattening:
+ groups:
+ - parameters:
+ - project_id
+ required_fields:
+ - project_id
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ request_object_method: true
diff --git a/third_party/googleapis/google/devtools/source/v1/source_context.proto b/third_party/googleapis/google/devtools/source/v1/source_context.proto
new file mode 100644
index 0000000000..cacfebdd2b
--- /dev/null
+++ b/third_party/googleapis/google/devtools/source/v1/source_context.proto
@@ -0,0 +1,181 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.devtools.source.v1;
+
+import "google/api/annotations.proto";
+
+option csharp_namespace = "Google.Cloud.DevTools.Source.V1";
+option go_package = "google.golang.org/genproto/googleapis/devtools/source/v1;source";
+option java_multiple_files = true;
+option java_outer_classname = "SourceContextProto";
+option java_package = "com.google.devtools.source.v1";
+
+
+// A SourceContext is a reference to a tree of files. A SourceContext together
+// with a path point to a unique revision of a single file or directory.
+message SourceContext {
+ // A SourceContext can refer any one of the following types of repositories.
+ oneof context {
+ // A SourceContext referring to a revision in a cloud repo.
+ CloudRepoSourceContext cloud_repo = 1;
+
+ // A SourceContext referring to a snapshot in a cloud workspace.
+ CloudWorkspaceSourceContext cloud_workspace = 2;
+
+ // A SourceContext referring to a Gerrit project.
+ GerritSourceContext gerrit = 3;
+
+ // A SourceContext referring to any third party Git repo (e.g. GitHub).
+ GitSourceContext git = 6;
+ }
+}
+
+// An ExtendedSourceContext is a SourceContext combined with additional
+// details describing the context.
+message ExtendedSourceContext {
+ // Any source context.
+ SourceContext context = 1;
+
+ // Labels with user defined metadata.
+ map<string, string> labels = 2;
+}
+
+// An alias to a repo revision.
+message AliasContext {
+ // The type of an Alias.
+ enum Kind {
+ // Do not use.
+ ANY = 0;
+
+ // Git tag
+ FIXED = 1;
+
+ // Git branch
+ MOVABLE = 2;
+
+ // OTHER is used to specify non-standard aliases, those not of the kinds
+ // above. For example, if a Git repo has a ref named "refs/foo/bar", it
+ // is considered to be of kind OTHER.
+ OTHER = 4;
+ }
+
+ // The alias kind.
+ Kind kind = 1;
+
+ // The alias name.
+ string name = 2;
+}
+
+// A CloudRepoSourceContext denotes a particular revision in a cloud
+// repo (a repo hosted by the Google Cloud Platform).
+message CloudRepoSourceContext {
+ // The ID of the repo.
+ RepoId repo_id = 1;
+
+ // A revision in a cloud repository can be identified by either its revision
+ // ID or its Alias.
+ oneof revision {
+ // A revision ID.
+ string revision_id = 2;
+
+ // The name of an alias (branch, tag, etc.).
+ string alias_name = 3;
+
+ // An alias, which may be a branch or tag.
+ AliasContext alias_context = 4;
+ }
+}
+
+// A CloudWorkspaceSourceContext denotes a workspace at a particular snapshot.
+message CloudWorkspaceSourceContext {
+ // The ID of the workspace.
+ CloudWorkspaceId workspace_id = 1;
+
+ // The ID of the snapshot.
+ // An empty snapshot_id refers to the most recent snapshot.
+ string snapshot_id = 2;
+}
+
+// A SourceContext referring to a Gerrit project.
+message GerritSourceContext {
+ // The URI of a running Gerrit instance.
+ string host_uri = 1;
+
+ // The full project name within the host. Projects may be nested, so
+ // "project/subproject" is a valid project name.
+ // The "repo name" is hostURI/project.
+ string gerrit_project = 2;
+
+ // A revision in a Gerrit project can be identified by either its revision ID
+ // or its alias.
+ oneof revision {
+ // A revision (commit) ID.
+ string revision_id = 3;
+
+ // The name of an alias (branch, tag, etc.).
+ string alias_name = 4;
+
+ // An alias, which may be a branch or tag.
+ AliasContext alias_context = 5;
+ }
+}
+
+// A GitSourceContext denotes a particular revision in a third party Git
+// repository (e.g. GitHub).
+message GitSourceContext {
+ // Git repository URL.
+ string url = 1;
+
+ // Git commit hash.
+ // required.
+ string revision_id = 2;
+}
+
+// A unique identifier for a cloud repo.
+message RepoId {
+ // A cloud repository can be identified by either its project ID and
+ // repository name combination, or its globally unique identifier.
+ oneof id {
+ // A combination of a project ID and a repo name.
+ ProjectRepoId project_repo_id = 1;
+
+ // A server-assigned, globally unique identifier.
+ string uid = 2;
+ }
+}
+
+// Selects a repo using a Google Cloud Platform project ID
+// (e.g. winged-cargo-31) and a repo name within that project.
+message ProjectRepoId {
+ // The ID of the project.
+ string project_id = 1;
+
+ // The name of the repo. Leave empty for the default repo.
+ string repo_name = 2;
+}
+
+// A CloudWorkspaceId is a unique identifier for a cloud workspace.
+// A cloud workspace is a place associated with a repo where modified files
+// can be stored before they are committed.
+message CloudWorkspaceId {
+ // The ID of the repo containing the workspace.
+ RepoId repo_id = 1;
+
+ // The unique name of the workspace within the repo. This is the name
+ // chosen by the client in the Source API's CreateWorkspace method.
+ string name = 2;
+}
diff --git a/third_party/googleapis/google/devtools/sourcerepo/v1/sourcerepo.proto b/third_party/googleapis/google/devtools/sourcerepo/v1/sourcerepo.proto
new file mode 100644
index 0000000000..a50a30b1d4
--- /dev/null
+++ b/third_party/googleapis/google/devtools/sourcerepo/v1/sourcerepo.proto
@@ -0,0 +1,147 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.devtools.sourcerepo.v1;
+
+import "google/api/annotations.proto";
+import "google/api/auth.proto";
+import "google/iam/v1/iam_policy.proto";
+import "google/iam/v1/policy.proto";
+import "google/protobuf/empty.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/devtools/sourcerepo/v1;sourcerepo";
+option java_multiple_files = true;
+option java_outer_classname = "SourceRepoProto";
+option java_package = "com.google.devtools.sourcerepo.v1";
+
+
+// The Source Repo API service.
+service SourceRepo {
+ // Returns all repos belonging to a project.
+ rpc ListRepos(ListReposRequest) returns (ListReposResponse) {
+ option (google.api.http) = { get: "/v1/{name=projects/*}/repos" };
+ }
+
+ // Returns information about a repo.
+ rpc GetRepo(GetRepoRequest) returns (Repo) {
+ option (google.api.http) = { get: "/v1/{name=projects/*/repos/**}" };
+ }
+
+ // Creates a repo in the given project with the given name..
+ //
+ // If the named repository already exists, `CreateRepo` returns
+ // `ALREADY_EXISTS`.
+ rpc CreateRepo(CreateRepoRequest) returns (Repo) {
+ option (google.api.http) = { post: "/v1/{parent=projects/*}/repos" body: "repo" };
+ }
+
+ // Deletes a repo.
+ rpc DeleteRepo(DeleteRepoRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/{name=projects/*/repos/**}" };
+ }
+
+ // Sets the access control policy on the specified resource. Replaces any
+ // existing policy.
+ rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) {
+ option (google.api.http) = { post: "/v1/{resource=projects/*/repos/**}:setIamPolicy" body: "*" };
+ }
+
+ // Gets the access control policy for a resource.
+ // Returns an empty policy if the resource exists and does not have a policy
+ // set.
+ rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) {
+ option (google.api.http) = { get: "/v1/{resource=projects/*/repos/**}:getIamPolicy" };
+ }
+
+ // Returns permissions that a caller has on the specified resource.
+ // If the resource does not exist, this will return an empty set of
+ // permissions, not a NOT_FOUND error.
+ rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) {
+ option (google.api.http) = { post: "/v1/{resource=projects/*/repos/**}:testIamPermissions" body: "*" };
+ }
+}
+
+// A repository (or repo) is a Git repository storing versioned source content.
+message Repo {
+ // Resource name of the repository, of the form
+ // `projects/<project>/repos/<repo>`.
+ string name = 1;
+
+ // The size in bytes of the repo.
+ int64 size = 2;
+
+ // URL to clone the repository from Google Cloud Source Repositories.
+ string url = 3;
+
+ // How this repository mirrors a repository managed by another service.
+ MirrorConfig mirror_config = 4;
+}
+
+// Configuration to automatically mirror a repository from another
+// hosting service, for example GitHub or BitBucket.
+message MirrorConfig {
+ // URL of the main repository at the other hosting service.
+ string url = 1;
+
+ // ID of the webhook listening to updates to trigger mirroring.
+ // Removing this webook from the other hosting service will stop
+ // Google Cloud Source Repositories from receiving notifications,
+ // and thereby disabling mirroring.
+ string webhook_id = 2;
+
+ // ID of the SSH deploy key at the other hosting service.
+ // Removing this key from the other service would deauthorize
+ // Google Cloud Source Repositories from mirroring.
+ string deploy_key_id = 3;
+}
+
+// Request for GetRepo.
+message GetRepoRequest {
+ // The name of the requested repository. Values are of the form
+ // `projects/<project>/repos/<repo>`.
+ string name = 1;
+}
+
+// Request for ListRepos.
+message ListReposRequest {
+ // The project ID whose repos should be listed. Values are of the form
+ // `projects/<project>`.
+ string name = 1;
+}
+
+// Response for ListRepos.
+message ListReposResponse {
+ // The listed repos.
+ repeated Repo repos = 1;
+}
+
+// Request for CreateRepo
+message CreateRepoRequest {
+ // The project in which to create the repo. Values are of the form
+ // `projects/<project>`.
+ string parent = 1;
+
+ // The repo to create. Only name needs to be set; all other fields
+ // are currently ignored.
+ Repo repo = 2;
+}
+
+// Request for DeleteRepo.
+message DeleteRepoRequest {
+ // The name of the repo to delete. Values are of the form
+ // `projects/<project>/repos/<repo>`.
+ string name = 1;
+}
diff --git a/third_party/googleapis/google/example/library/README.md b/third_party/googleapis/google/example/library/README.md
new file mode 100644
index 0000000000..ee3aa684d5
--- /dev/null
+++ b/third_party/googleapis/google/example/library/README.md
@@ -0,0 +1,4 @@
+# Introduction
+This is a Google example service representing a simple digital library.
+It manages a collection of shelf resources, and each shelf owns a collection
+of book resources.
diff --git a/third_party/googleapis/google/example/library/v1/library.proto b/third_party/googleapis/google/example/library/v1/library.proto
new file mode 100644
index 0000000000..c31f77201e
--- /dev/null
+++ b/third_party/googleapis/google/example/library/v1/library.proto
@@ -0,0 +1,254 @@
+// Copyright (c) 2015, Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.example.library.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/empty.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/example/library/v1;library";
+option java_multiple_files = true;
+option java_outer_classname = "LibraryProto";
+option java_package = "com.google.example.library.v1";
+
+
+// This API represents a simple digital library. It lets you manage Shelf
+// resources and Book resources in the library. It defines the following
+// resource model:
+//
+// - The API has a collection of [Shelf][google.example.library.v1.Shelf]
+// resources, named `shelves/*`
+//
+// - Each Shelf has a collection of [Book][google.example.library.v1.Book]
+// resources, named `shelves/*/books/*`
+service LibraryService {
+ // Creates a shelf, and returns the new Shelf.
+ rpc CreateShelf(CreateShelfRequest) returns (Shelf) {
+ option (google.api.http) = { post: "/v1/shelves" body: "shelf" };
+ }
+
+ // Gets a shelf. Returns NOT_FOUND if the shelf does not exist.
+ rpc GetShelf(GetShelfRequest) returns (Shelf) {
+ option (google.api.http) = { get: "/v1/{name=shelves/*}" };
+ }
+
+ // Lists shelves. The order is unspecified but deterministic. Newly created
+ // shelves will not necessarily be added to the end of this list.
+ rpc ListShelves(ListShelvesRequest) returns (ListShelvesResponse) {
+ option (google.api.http) = { get: "/v1/shelves" };
+ }
+
+ // Deletes a shelf. Returns NOT_FOUND if the shelf does not exist.
+ rpc DeleteShelf(DeleteShelfRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/{name=shelves/*}" };
+ }
+
+ // Merges two shelves by adding all books from the shelf named
+ // `other_shelf_name` to shelf `name`, and deletes
+ // `other_shelf_name`. Returns the updated shelf.
+ // The book ids of the moved books may not be the same as the original books.
+ //
+ // Returns NOT_FOUND if either shelf does not exist.
+ // This call is a no-op if the specified shelves are the same.
+ rpc MergeShelves(MergeShelvesRequest) returns (Shelf) {
+ option (google.api.http) = { post: "/v1/{name=shelves/*}:merge" body: "*" };
+ }
+
+ // Creates a book, and returns the new Book.
+ rpc CreateBook(CreateBookRequest) returns (Book) {
+ option (google.api.http) = { post: "/v1/{name=shelves/*}/books" body: "book" };
+ }
+
+ // Gets a book. Returns NOT_FOUND if the book does not exist.
+ rpc GetBook(GetBookRequest) returns (Book) {
+ option (google.api.http) = { get: "/v1/{name=shelves/*/books/*}" };
+ }
+
+ // Lists books in a shelf. The order is unspecified but deterministic. Newly
+ // created books will not necessarily be added to the end of this list.
+ // Returns NOT_FOUND if the shelf does not exist.
+ rpc ListBooks(ListBooksRequest) returns (ListBooksResponse) {
+ option (google.api.http) = { get: "/v1/{name=shelves/*}/books" };
+ }
+
+ // Deletes a book. Returns NOT_FOUND if the book does not exist.
+ rpc DeleteBook(DeleteBookRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/{name=shelves/*/books/*}" };
+ }
+
+ // Updates a book. Returns INVALID_ARGUMENT if the name of the book
+ // is non-empty and does equal the previous name.
+ rpc UpdateBook(UpdateBookRequest) returns (Book) {
+ option (google.api.http) = { put: "/v1/{name=shelves/*/books/*}" body: "book" };
+ }
+
+ // Moves a book to another shelf, and returns the new book. The book
+ // id of the new book may not be the same as the original book.
+ rpc MoveBook(MoveBookRequest) returns (Book) {
+ option (google.api.http) = { post: "/v1/{name=shelves/*/books/*}:move" body: "*" };
+ }
+}
+
+// A single book in the library.
+message Book {
+ // The resource name of the book.
+ // Book names have the form `shelves/{shelf_id}/books/{book_id}`.
+ // The name is ignored when creating a book.
+ string name = 1;
+
+ // The name of the book author.
+ string author = 2;
+
+ // The title of the book.
+ string title = 3;
+
+ // Value indicating whether the book has been read.
+ bool read = 4;
+}
+
+// A Shelf contains a collection of books with a theme.
+message Shelf {
+ // The resource name of the shelf.
+ // Shelf names have the form `shelves/{shelf_id}`.
+ // The name is ignored when creating a shelf.
+ string name = 1;
+
+ // The theme of the shelf
+ string theme = 2;
+}
+
+// Request message for LibraryService.CreateShelf.
+message CreateShelfRequest {
+ // The shelf to create.
+ Shelf shelf = 1;
+}
+
+// Request message for LibraryService.GetShelf.
+message GetShelfRequest {
+ // The name of the shelf to retrieve.
+ string name = 1;
+}
+
+// Request message for LibraryService.ListShelves.
+message ListShelvesRequest {
+ // Requested page size. Server may return fewer shelves than requested.
+ // If unspecified, server will pick an appropriate default.
+ int32 page_size = 1;
+
+ // A token identifying a page of results the server should return.
+ // Typically, this is the value of
+ // [ListShelvesResponse.next_page_token][google.example.library.v1.ListShelvesResponse.next_page_token]
+ // returned from the previous call to `ListShelves` method.
+ string page_token = 2;
+}
+
+// Response message for LibraryService.ListShelves.
+message ListShelvesResponse {
+ // The list of shelves.
+ repeated Shelf shelves = 1;
+
+ // A token to retrieve next page of results.
+ // Pass this value in the
+ // [ListShelvesRequest.page_token][google.example.library.v1.ListShelvesRequest.page_token]
+ // field in the subsequent call to `ListShelves` method to retrieve the next
+ // page of results.
+ string next_page_token = 2;
+}
+
+// Request message for LibraryService.DeleteShelf.
+message DeleteShelfRequest {
+ // The name of the shelf to delete.
+ string name = 1;
+}
+
+// Describes the shelf being removed (other_shelf_name) and updated
+// (name) in this merge.
+message MergeShelvesRequest {
+ // The name of the shelf we're adding books to.
+ string name = 1;
+
+ // The name of the shelf we're removing books from and deleting.
+ string other_shelf_name = 2;
+}
+
+// Request message for LibraryService.CreateBook.
+message CreateBookRequest {
+ // The name of the shelf in which the book is created.
+ string name = 1;
+
+ // The book to create.
+ Book book = 2;
+}
+
+// Request message for LibraryService.GetBook.
+message GetBookRequest {
+ // The name of the book to retrieve.
+ string name = 1;
+}
+
+// Request message for LibraryService.ListBooks.
+message ListBooksRequest {
+ // The name of the shelf whose books we'd like to list.
+ string name = 1;
+
+ // Requested page size. Server may return fewer books than requested.
+ // If unspecified, server will pick an appropriate default.
+ int32 page_size = 2;
+
+ // A token identifying a page of results the server should return.
+ // Typically, this is the value of
+ // [ListBooksResponse.next_page_token][google.example.library.v1.ListBooksResponse.next_page_token].
+ // returned from the previous call to `ListBooks` method.
+ string page_token = 3;
+}
+
+// Response message for LibraryService.ListBooks.
+message ListBooksResponse {
+ // The list of books.
+ repeated Book books = 1;
+
+ // A token to retrieve next page of results.
+ // Pass this value in the
+ // [ListBooksRequest.page_token][google.example.library.v1.ListBooksRequest.page_token]
+ // field in the subsequent call to `ListBooks` method to retrieve the next
+ // page of results.
+ string next_page_token = 2;
+}
+
+// Request message for LibraryService.UpdateBook.
+message UpdateBookRequest {
+ // The name of the book to update.
+ string name = 1;
+
+ // The book to update with. The name must match or be empty.
+ Book book = 2;
+}
+
+// Request message for LibraryService.DeleteBook.
+message DeleteBookRequest {
+ // The name of the book to delete.
+ string name = 1;
+}
+
+// Describes what book to move (name) and what shelf we're moving it
+// to (other_shelf_name).
+message MoveBookRequest {
+ // The name of the book to move.
+ string name = 1;
+
+ // The name of the destination shelf.
+ string other_shelf_name = 2;
+}
diff --git a/third_party/googleapis/google/genomics/README.md b/third_party/googleapis/google/genomics/README.md
new file mode 100644
index 0000000000..8bc621804a
--- /dev/null
+++ b/third_party/googleapis/google/genomics/README.md
@@ -0,0 +1,14 @@
+Stores, processes, explores and shares genomic data. This API implements
+the Global Alliance for Genomics and Health (GA4GH) v0.5.1 API as well as
+several extensions.
+
+The Google Genomics API supports access via both
+[JSON/REST](https://cloud.google.com/genomics/reference/rest) and
+[gRPC](https://cloud.google.com/genomics/reference/rpc). JSON/REST is more
+broadly available and is easier for getting started with Google Genomics; it
+works well for small metadata resources (datasets, variant sets, read group
+sets) and for browsing small genomic regions for datasets of any size. For
+performant bulk data access (reads and variants), use gRPC.
+
+See also an [overview of genomic resources](https://cloud.google.com/genomics/v1/users-guide)
+and an overview of [Genomics on Google Cloud](https://cloud.google.com/genomics/overview). \ No newline at end of file
diff --git a/third_party/googleapis/google/genomics/v1/annotations.proto b/third_party/googleapis/google/genomics/v1/annotations.proto
new file mode 100644
index 0000000000..0a1e999eb2
--- /dev/null
+++ b/third_party/googleapis/google/genomics/v1/annotations.proto
@@ -0,0 +1,672 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.genomics.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/struct.proto";
+import "google/protobuf/wrappers.proto";
+import "google/rpc/status.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/genomics/v1;genomics";
+option java_multiple_files = true;
+option java_outer_classname = "AnnotationsProto";
+option java_package = "com.google.genomics.v1";
+
+
+// This service provides storage and positional retrieval of genomic
+// reference annotations, including variant annotations.
+service AnnotationServiceV1 {
+ // Creates a new annotation set. Caller must have WRITE permission for the
+ // associated dataset.
+ //
+ // The following fields are required:
+ //
+ // * [datasetId][google.genomics.v1.AnnotationSet.dataset_id]
+ // * [referenceSetId][google.genomics.v1.AnnotationSet.reference_set_id]
+ //
+ // All other fields may be optionally specified, unless documented as being
+ // server-generated (for example, the `id` field).
+ rpc CreateAnnotationSet(CreateAnnotationSetRequest) returns (AnnotationSet) {
+ option (google.api.http) = { post: "/v1/annotationsets" body: "annotation_set" };
+ }
+
+ // Gets an annotation set. Caller must have READ permission for
+ // the associated dataset.
+ rpc GetAnnotationSet(GetAnnotationSetRequest) returns (AnnotationSet) {
+ option (google.api.http) = { get: "/v1/annotationsets/{annotation_set_id}" };
+ }
+
+ // Updates an annotation set. The update must respect all mutability
+ // restrictions and other invariants described on the annotation set resource.
+ // Caller must have WRITE permission for the associated dataset.
+ rpc UpdateAnnotationSet(UpdateAnnotationSetRequest) returns (AnnotationSet) {
+ option (google.api.http) = { put: "/v1/annotationsets/{annotation_set_id}" body: "annotation_set" };
+ }
+
+ // Deletes an annotation set. Caller must have WRITE permission
+ // for the associated annotation set.
+ rpc DeleteAnnotationSet(DeleteAnnotationSetRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/annotationsets/{annotation_set_id}" };
+ }
+
+ // Searches for annotation sets that match the given criteria. Annotation sets
+ // are returned in an unspecified order. This order is consistent, such that
+ // two queries for the same content (regardless of page size) yield annotation
+ // sets in the same order across their respective streams of paginated
+ // responses. Caller must have READ permission for the queried datasets.
+ rpc SearchAnnotationSets(SearchAnnotationSetsRequest) returns (SearchAnnotationSetsResponse) {
+ option (google.api.http) = { post: "/v1/annotationsets/search" body: "*" };
+ }
+
+ // Creates a new annotation. Caller must have WRITE permission
+ // for the associated annotation set.
+ //
+ // The following fields are required:
+ //
+ // * [annotationSetId][google.genomics.v1.Annotation.annotation_set_id]
+ // * [referenceName][google.genomics.v1.Annotation.reference_name] or
+ // [referenceId][google.genomics.v1.Annotation.reference_id]
+ //
+ // ### Transcripts
+ //
+ // For annotations of type TRANSCRIPT, the following fields of
+ // [transcript][google.genomics.v1.Annotation.transcript] must be provided:
+ //
+ // * [exons.start][google.genomics.v1.Transcript.Exon.start]
+ // * [exons.end][google.genomics.v1.Transcript.Exon.end]
+ //
+ // All other fields may be optionally specified, unless documented as being
+ // server-generated (for example, the `id` field). The annotated
+ // range must be no longer than 100Mbp (mega base pairs). See the
+ // [Annotation resource][google.genomics.v1.Annotation]
+ // for additional restrictions on each field.
+ rpc CreateAnnotation(CreateAnnotationRequest) returns (Annotation) {
+ option (google.api.http) = { post: "/v1/annotations" body: "annotation" };
+ }
+
+ // Creates one or more new annotations atomically. All annotations must
+ // belong to the same annotation set. Caller must have WRITE
+ // permission for this annotation set. For optimal performance, batch
+ // positionally adjacent annotations together.
+ //
+ // If the request has a systemic issue, such as an attempt to write to
+ // an inaccessible annotation set, the entire RPC will fail accordingly. For
+ // lesser data issues, when possible an error will be isolated to the
+ // corresponding batch entry in the response; the remaining well formed
+ // annotations will be created normally.
+ //
+ // For details on the requirements for each individual annotation resource,
+ // see
+ // [CreateAnnotation][google.genomics.v1.AnnotationServiceV1.CreateAnnotation].
+ rpc BatchCreateAnnotations(BatchCreateAnnotationsRequest) returns (BatchCreateAnnotationsResponse) {
+ option (google.api.http) = { post: "/v1/annotations:batchCreate" body: "*" };
+ }
+
+ // Gets an annotation. Caller must have READ permission
+ // for the associated annotation set.
+ rpc GetAnnotation(GetAnnotationRequest) returns (Annotation) {
+ option (google.api.http) = { get: "/v1/annotations/{annotation_id}" };
+ }
+
+ // Updates an annotation. Caller must have
+ // WRITE permission for the associated dataset.
+ rpc UpdateAnnotation(UpdateAnnotationRequest) returns (Annotation) {
+ option (google.api.http) = { put: "/v1/annotations/{annotation_id}" body: "annotation" };
+ }
+
+ // Deletes an annotation. Caller must have WRITE permission for
+ // the associated annotation set.
+ rpc DeleteAnnotation(DeleteAnnotationRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/annotations/{annotation_id}" };
+ }
+
+ // Searches for annotations that match the given criteria. Results are
+ // ordered by genomic coordinate (by reference sequence, then position).
+ // Annotations with equivalent genomic coordinates are returned in an
+ // unspecified order. This order is consistent, such that two queries for the
+ // same content (regardless of page size) yield annotations in the same order
+ // across their respective streams of paginated responses. Caller must have
+ // READ permission for the queried annotation sets.
+ rpc SearchAnnotations(SearchAnnotationsRequest) returns (SearchAnnotationsResponse) {
+ option (google.api.http) = { post: "/v1/annotations/search" body: "*" };
+ }
+}
+
+// An annotation set is a logical grouping of annotations that share consistent
+// type information and provenance. Examples of annotation sets include 'all
+// genes from refseq', and 'all variant annotations from ClinVar'.
+message AnnotationSet {
+ // The server-generated annotation set ID, unique across all annotation sets.
+ string id = 1;
+
+ // The dataset to which this annotation set belongs.
+ string dataset_id = 2;
+
+ // The ID of the reference set that defines the coordinate space for this
+ // set's annotations.
+ string reference_set_id = 3;
+
+ // The display name for this annotation set.
+ string name = 4;
+
+ // The source URI describing the file from which this annotation set was
+ // generated, if any.
+ string source_uri = 5;
+
+ // The type of annotations contained within this set.
+ AnnotationType type = 6;
+
+ // A map of additional read alignment information. This must be of the form
+ // map<string, string[]> (string key mapping to a list of string values).
+ map<string, google.protobuf.ListValue> info = 17;
+}
+
+// An annotation describes a region of reference genome. The value of an
+// annotation may be one of several canonical types, supplemented by arbitrary
+// info tags. An annotation is not inherently associated with a specific
+// sample or individual (though a client could choose to use annotations in
+// this way). Example canonical annotation types are `GENE` and
+// `VARIANT`.
+message Annotation {
+ // The server-generated annotation ID, unique across all annotations.
+ string id = 1;
+
+ // The annotation set to which this annotation belongs.
+ string annotation_set_id = 2;
+
+ // The display name of this annotation.
+ string name = 3;
+
+ // The ID of the Google Genomics reference associated with this range.
+ string reference_id = 4;
+
+ // The display name corresponding to the reference specified by
+ // `referenceId`, for example `chr1`, `1`, or `chrX`.
+ string reference_name = 5;
+
+ // The start position of the range on the reference, 0-based inclusive.
+ int64 start = 6;
+
+ // The end position of the range on the reference, 0-based exclusive.
+ int64 end = 7;
+
+ // Whether this range refers to the reverse strand, as opposed to the forward
+ // strand. Note that regardless of this field, the start/end position of the
+ // range always refer to the forward strand.
+ bool reverse_strand = 8;
+
+ // The data type for this annotation. Must match the containing annotation
+ // set's type.
+ AnnotationType type = 9;
+
+ oneof value {
+ // A variant annotation, which describes the effect of a variant on the
+ // genome, the coding sequence, and/or higher level consequences at the
+ // organism level e.g. pathogenicity. This field is only set for annotations
+ // of type `VARIANT`.
+ VariantAnnotation variant = 10;
+
+ // A transcript value represents the assertion that a particular region of
+ // the reference genome may be transcribed as RNA. An alternative splicing
+ // pattern would be represented as a separate transcript object. This field
+ // is only set for annotations of type `TRANSCRIPT`.
+ Transcript transcript = 11;
+ }
+
+ // A map of additional read alignment information. This must be of the form
+ // map<string, string[]> (string key mapping to a list of string values).
+ map<string, google.protobuf.ListValue> info = 12;
+}
+
+message VariantAnnotation {
+ message ClinicalCondition {
+ // A set of names for the condition.
+ repeated string names = 1;
+
+ // The set of external IDs for this condition.
+ repeated ExternalId external_ids = 2;
+
+ // The MedGen concept id associated with this gene.
+ // Search for these IDs at http://www.ncbi.nlm.nih.gov/medgen/
+ string concept_id = 3;
+
+ // The OMIM id for this condition.
+ // Search for these IDs at http://omim.org/
+ string omim_id = 4;
+ }
+
+ enum Type {
+ TYPE_UNSPECIFIED = 0;
+
+ // `TYPE_OTHER` should be used when no other Type will suffice.
+ // Further explanation of the variant type may be included in the
+ // [info][google.genomics.v1.Annotation.info] field.
+ TYPE_OTHER = 1;
+
+ // `INSERTION` indicates an insertion.
+ INSERTION = 2;
+
+ // `DELETION` indicates a deletion.
+ DELETION = 3;
+
+ // `SUBSTITUTION` indicates a block substitution of
+ // two or more nucleotides.
+ SUBSTITUTION = 4;
+
+ // `SNP` indicates a single nucleotide polymorphism.
+ SNP = 5;
+
+ // `STRUCTURAL` indicates a large structural variant,
+ // including chromosomal fusions, inversions, etc.
+ STRUCTURAL = 6;
+
+ // `CNV` indicates a variation in copy number.
+ CNV = 7;
+ }
+
+ enum Effect {
+ EFFECT_UNSPECIFIED = 0;
+
+ // `EFFECT_OTHER` should be used when no other Effect
+ // will suffice.
+ EFFECT_OTHER = 1;
+
+ // `FRAMESHIFT` indicates a mutation in which the insertion or
+ // deletion of nucleotides resulted in a frameshift change.
+ FRAMESHIFT = 2;
+
+ // `FRAME_PRESERVING_INDEL` indicates a mutation in which a
+ // multiple of three nucleotides has been inserted or deleted, resulting
+ // in no change to the reading frame of the coding sequence.
+ FRAME_PRESERVING_INDEL = 3;
+
+ // `SYNONYMOUS_SNP` indicates a single nucleotide polymorphism
+ // mutation that results in no amino acid change.
+ SYNONYMOUS_SNP = 4;
+
+ // `NONSYNONYMOUS_SNP` indicates a single nucleotide
+ // polymorphism mutation that results in an amino acid change.
+ NONSYNONYMOUS_SNP = 5;
+
+ // `STOP_GAIN` indicates a mutation that leads to the creation
+ // of a stop codon at the variant site. Frameshift mutations creating
+ // downstream stop codons do not count as `STOP_GAIN`.
+ STOP_GAIN = 6;
+
+ // `STOP_LOSS` indicates a mutation that eliminates a
+ // stop codon at the variant site.
+ STOP_LOSS = 7;
+
+ // `SPLICE_SITE_DISRUPTION` indicates that this variant is
+ // found in a splice site for the associated transcript, and alters the
+ // normal splicing pattern.
+ SPLICE_SITE_DISRUPTION = 8;
+ }
+
+ enum ClinicalSignificance {
+ CLINICAL_SIGNIFICANCE_UNSPECIFIED = 0;
+
+ // `OTHER` should be used when no other clinical significance
+ // value will suffice.
+ CLINICAL_SIGNIFICANCE_OTHER = 1;
+
+ UNCERTAIN = 2;
+
+ BENIGN = 3;
+
+ LIKELY_BENIGN = 4;
+
+ LIKELY_PATHOGENIC = 5;
+
+ PATHOGENIC = 6;
+
+ DRUG_RESPONSE = 7;
+
+ HISTOCOMPATIBILITY = 8;
+
+ CONFERS_SENSITIVITY = 9;
+
+ RISK_FACTOR = 10;
+
+ ASSOCIATION = 11;
+
+ PROTECTIVE = 12;
+
+ // `MULTIPLE_REPORTED` should be used when multiple clinical
+ // signficances are reported for a variant. The original clinical
+ // significance values may be provided in the `info` field.
+ MULTIPLE_REPORTED = 13;
+ }
+
+ // Type has been adapted from ClinVar's list of variant types.
+ Type type = 1;
+
+ // Effect of the variant on the coding sequence.
+ Effect effect = 2;
+
+ // The alternate allele for this variant. If multiple alternate alleles
+ // exist at this location, create a separate variant for each one, as they
+ // may represent distinct conditions.
+ string alternate_bases = 3;
+
+ // Google annotation ID of the gene affected by this variant. This should
+ // be provided when the variant is created.
+ string gene_id = 4;
+
+ // Google annotation IDs of the transcripts affected by this variant. These
+ // should be provided when the variant is created.
+ repeated string transcript_ids = 5;
+
+ // The set of conditions associated with this variant.
+ // A condition describes the way a variant influences human health.
+ repeated ClinicalCondition conditions = 6;
+
+ // Describes the clinical significance of a variant.
+ // It is adapted from the ClinVar controlled vocabulary for clinical
+ // significance described at:
+ // http://www.ncbi.nlm.nih.gov/clinvar/docs/clinsig/
+ ClinicalSignificance clinical_significance = 7;
+}
+
+// A transcript represents the assertion that a particular region of the
+// reference genome may be transcribed as RNA.
+message Transcript {
+ message Exon {
+ // The start position of the exon on this annotation's reference sequence,
+ // 0-based inclusive. Note that this is relative to the reference start, and
+ // **not** the containing annotation start.
+ int64 start = 1;
+
+ // The end position of the exon on this annotation's reference sequence,
+ // 0-based exclusive. Note that this is relative to the reference start, and
+ // *not* the containing annotation start.
+ int64 end = 2;
+
+ // The frame of this exon. Contains a value of 0, 1, or 2, which indicates
+ // the offset of the first coding base of the exon within the reading frame
+ // of the coding DNA sequence, if any. This field is dependent on the
+ // strandedness of this annotation (see
+ // [Annotation.reverse_strand][google.genomics.v1.Annotation.reverse_strand]).
+ // For forward stranded annotations, this offset is relative to the
+ // [exon.start][google.genomics.v1.Transcript.Exon.start]. For reverse
+ // strand annotations, this offset is relative to the
+ // [exon.end][google.genomics.v1.Transcript.Exon.end] `- 1`.
+ //
+ // Unset if this exon does not intersect the coding sequence. Upon creation
+ // of a transcript, the frame must be populated for all or none of the
+ // coding exons.
+ google.protobuf.Int32Value frame = 3;
+ }
+
+ message CodingSequence {
+ // The start of the coding sequence on this annotation's reference sequence,
+ // 0-based inclusive. Note that this position is relative to the reference
+ // start, and *not* the containing annotation start.
+ int64 start = 1;
+
+ // The end of the coding sequence on this annotation's reference sequence,
+ // 0-based exclusive. Note that this position is relative to the reference
+ // start, and *not* the containing annotation start.
+ int64 end = 2;
+ }
+
+ // The annotation ID of the gene from which this transcript is transcribed.
+ string gene_id = 1;
+
+ // The <a href="http://en.wikipedia.org/wiki/Exon">exons</a> that compose
+ // this transcript. This field should be unset for genomes where transcript
+ // splicing does not occur, for example prokaryotes.
+ //
+ // Introns are regions of the transcript that are not included in the
+ // spliced RNA product. Though not explicitly modeled here, intron ranges can
+ // be deduced; all regions of this transcript that are not exons are introns.
+ //
+ // Exonic sequences do not necessarily code for a translational product
+ // (amino acids). Only the regions of exons bounded by the
+ // [codingSequence][google.genomics.v1.Transcript.coding_sequence] correspond
+ // to coding DNA sequence.
+ //
+ // Exons are ordered by start position and may not overlap.
+ repeated Exon exons = 2;
+
+ // The range of the coding sequence for this transcript, if any. To determine
+ // the exact ranges of coding sequence, intersect this range with those of the
+ // [exons][google.genomics.v1.Transcript.exons], if any. If there are any
+ // [exons][google.genomics.v1.Transcript.exons], the
+ // [codingSequence][google.genomics.v1.Transcript.coding_sequence] must start
+ // and end within them.
+ //
+ // Note that in some cases, the reference genome will not exactly match the
+ // observed mRNA transcript e.g. due to variance in the source genome from
+ // reference. In these cases,
+ // [exon.frame][google.genomics.v1.Transcript.Exon.frame] will not necessarily
+ // match the expected reference reading frame and coding exon reference bases
+ // cannot necessarily be concatenated to produce the original transcript mRNA.
+ CodingSequence coding_sequence = 3;
+}
+
+message ExternalId {
+ // The name of the source of this data.
+ string source_name = 1;
+
+ // The id used by the source of this data.
+ string id = 2;
+}
+
+message CreateAnnotationSetRequest {
+ // The annotation set to create.
+ AnnotationSet annotation_set = 1;
+}
+
+message GetAnnotationSetRequest {
+ // The ID of the annotation set to be retrieved.
+ string annotation_set_id = 1;
+}
+
+message UpdateAnnotationSetRequest {
+ // The ID of the annotation set to be updated.
+ string annotation_set_id = 1;
+
+ // The new annotation set.
+ AnnotationSet annotation_set = 2;
+
+ // An optional mask specifying which fields to update. Mutable fields are
+ // [name][google.genomics.v1.AnnotationSet.name],
+ // [source_uri][google.genomics.v1.AnnotationSet.source_uri], and
+ // [info][google.genomics.v1.AnnotationSet.info]. If unspecified, all
+ // mutable fields will be updated.
+ google.protobuf.FieldMask update_mask = 3;
+}
+
+message DeleteAnnotationSetRequest {
+ // The ID of the annotation set to be deleted.
+ string annotation_set_id = 1;
+}
+
+message SearchAnnotationSetsRequest {
+ // Required. The dataset IDs to search within. Caller must have `READ` access
+ // to these datasets.
+ repeated string dataset_ids = 1;
+
+ // If specified, only annotation sets associated with the given reference set
+ // are returned.
+ string reference_set_id = 2;
+
+ // Only return annotations sets for which a substring of the name matches this
+ // string (case insensitive).
+ string name = 3;
+
+ // If specified, only annotation sets that have any of these types are
+ // returned.
+ repeated AnnotationType types = 4;
+
+ // The continuation token, which is used to page through large result sets.
+ // To get the next page of results, set this parameter to the value of
+ // `nextPageToken` from the previous response.
+ string page_token = 5;
+
+ // The maximum number of results to return in a single page. If unspecified,
+ // defaults to 128. The maximum value is 1024.
+ int32 page_size = 6;
+}
+
+message SearchAnnotationSetsResponse {
+ // The matching annotation sets.
+ repeated AnnotationSet annotation_sets = 1;
+
+ // The continuation token, which is used to page through large result sets.
+ // Provide this value in a subsequent request to return the next page of
+ // results. This field will be empty if there aren't any additional results.
+ string next_page_token = 2;
+}
+
+message CreateAnnotationRequest {
+ // The annotation to be created.
+ Annotation annotation = 1;
+}
+
+message BatchCreateAnnotationsRequest {
+ // The annotations to be created. At most 4096 can be specified in a single
+ // request.
+ repeated Annotation annotations = 1;
+
+ // A unique request ID which enables the server to detect duplicated requests.
+ // If provided, duplicated requests will result in the same response; if not
+ // provided, duplicated requests may result in duplicated data. For a given
+ // annotation set, callers should not reuse `request_id`s when writing
+ // different batches of annotations - behavior in this case is undefined.
+ // A common approach is to use a UUID. For batch jobs where worker crashes are
+ // a possibility, consider using some unique variant of a worker or run ID.
+ string request_id = 2;
+}
+
+message BatchCreateAnnotationsResponse {
+ message Entry {
+ // The creation status.
+ google.rpc.Status status = 1;
+
+ // The created annotation, if creation was successful.
+ Annotation annotation = 2;
+ }
+
+ // The resulting per-annotation entries, ordered consistently with the
+ // original request.
+ repeated Entry entries = 1;
+}
+
+message GetAnnotationRequest {
+ // The ID of the annotation to be retrieved.
+ string annotation_id = 1;
+}
+
+message UpdateAnnotationRequest {
+ // The ID of the annotation to be updated.
+ string annotation_id = 1;
+
+ // The new annotation.
+ Annotation annotation = 2;
+
+ // An optional mask specifying which fields to update. Mutable fields are
+ // [name][google.genomics.v1.Annotation.name],
+ // [variant][google.genomics.v1.Annotation.variant],
+ // [transcript][google.genomics.v1.Annotation.transcript], and
+ // [info][google.genomics.v1.Annotation.info]. If unspecified, all mutable
+ // fields will be updated.
+ google.protobuf.FieldMask update_mask = 3;
+}
+
+message DeleteAnnotationRequest {
+ // The ID of the annotation to be deleted.
+ string annotation_id = 1;
+}
+
+message SearchAnnotationsRequest {
+ // Required. The annotation sets to search within. The caller must have
+ // `READ` access to these annotation sets.
+ // All queried annotation sets must have the same type.
+ repeated string annotation_set_ids = 1;
+
+ // Required. `reference_id` or `reference_name` must be set.
+ oneof reference {
+ // The ID of the reference to query.
+ string reference_id = 2;
+
+ // The name of the reference to query, within the reference set associated
+ // with this query.
+ string reference_name = 3;
+ }
+
+ // The start position of the range on the reference, 0-based inclusive. If
+ // specified,
+ // [referenceId][google.genomics.v1.SearchAnnotationsRequest.reference_id] or
+ // [referenceName][google.genomics.v1.SearchAnnotationsRequest.reference_name]
+ // must be specified. Defaults to 0.
+ int64 start = 4;
+
+ // The end position of the range on the reference, 0-based exclusive. If
+ // [referenceId][google.genomics.v1.SearchAnnotationsRequest.reference_id] or
+ // [referenceName][google.genomics.v1.SearchAnnotationsRequest.reference_name]
+ // must be specified, Defaults to the length of the reference.
+ int64 end = 5;
+
+ // The continuation token, which is used to page through large result sets.
+ // To get the next page of results, set this parameter to the value of
+ // `nextPageToken` from the previous response.
+ string page_token = 6;
+
+ // The maximum number of results to return in a single page. If unspecified,
+ // defaults to 256. The maximum value is 2048.
+ int32 page_size = 7;
+}
+
+message SearchAnnotationsResponse {
+ // The matching annotations.
+ repeated Annotation annotations = 1;
+
+ // The continuation token, which is used to page through large result sets.
+ // Provide this value in a subsequent request to return the next page of
+ // results. This field will be empty if there aren't any additional results.
+ string next_page_token = 2;
+}
+
+// When an [Annotation][google.genomics.v1.Annotation] or
+// [AnnotationSet][google.genomics.v1.AnnotationSet] is created, if `type` is
+// not specified it will be set to `GENERIC`.
+enum AnnotationType {
+ ANNOTATION_TYPE_UNSPECIFIED = 0;
+
+ // A `GENERIC` annotation type should be used when no other annotation
+ // type will suffice. This represents an untyped annotation of the reference
+ // genome.
+ GENERIC = 1;
+
+ // A `VARIANT` annotation type.
+ VARIANT = 2;
+
+ // A `GENE` annotation type represents the existence of a gene at the
+ // associated reference coordinates. The start coordinate is typically the
+ // gene's transcription start site and the end is typically the end of the
+ // gene's last exon.
+ GENE = 3;
+
+ // A `TRANSCRIPT` annotation type represents the assertion that a
+ // particular region of the reference genome may be transcribed as RNA.
+ TRANSCRIPT = 4;
+}
diff --git a/third_party/googleapis/google/genomics/v1/cigar.proto b/third_party/googleapis/google/genomics/v1/cigar.proto
new file mode 100644
index 0000000000..e70e943877
--- /dev/null
+++ b/third_party/googleapis/google/genomics/v1/cigar.proto
@@ -0,0 +1,99 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.genomics.v1;
+
+import "google/api/annotations.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/genomics/v1;genomics";
+option java_multiple_files = true;
+option java_outer_classname = "CigarProto";
+option java_package = "com.google.genomics.v1";
+
+
+// A single CIGAR operation.
+message CigarUnit {
+ // Describes the different types of CIGAR alignment operations that exist.
+ // Used wherever CIGAR alignments are used.
+ enum Operation {
+ OPERATION_UNSPECIFIED = 0;
+
+ // An alignment match indicates that a sequence can be aligned to the
+ // reference without evidence of an INDEL. Unlike the
+ // `SEQUENCE_MATCH` and `SEQUENCE_MISMATCH` operators,
+ // the `ALIGNMENT_MATCH` operator does not indicate whether the
+ // reference and read sequences are an exact match. This operator is
+ // equivalent to SAM's `M`.
+ ALIGNMENT_MATCH = 1;
+
+ // The insert operator indicates that the read contains evidence of bases
+ // being inserted into the reference. This operator is equivalent to SAM's
+ // `I`.
+ INSERT = 2;
+
+ // The delete operator indicates that the read contains evidence of bases
+ // being deleted from the reference. This operator is equivalent to SAM's
+ // `D`.
+ DELETE = 3;
+
+ // The skip operator indicates that this read skips a long segment of the
+ // reference, but the bases have not been deleted. This operator is commonly
+ // used when working with RNA-seq data, where reads may skip long segments
+ // of the reference between exons. This operator is equivalent to SAM's
+ // `N`.
+ SKIP = 4;
+
+ // The soft clip operator indicates that bases at the start/end of a read
+ // have not been considered during alignment. This may occur if the majority
+ // of a read maps, except for low quality bases at the start/end of a read.
+ // This operator is equivalent to SAM's `S`. Bases that are soft
+ // clipped will still be stored in the read.
+ CLIP_SOFT = 5;
+
+ // The hard clip operator indicates that bases at the start/end of a read
+ // have been omitted from this alignment. This may occur if this linear
+ // alignment is part of a chimeric alignment, or if the read has been
+ // trimmed (for example, during error correction or to trim poly-A tails for
+ // RNA-seq). This operator is equivalent to SAM's `H`.
+ CLIP_HARD = 6;
+
+ // The pad operator indicates that there is padding in an alignment. This
+ // operator is equivalent to SAM's `P`.
+ PAD = 7;
+
+ // This operator indicates that this portion of the aligned sequence exactly
+ // matches the reference. This operator is equivalent to SAM's `=`.
+ SEQUENCE_MATCH = 8;
+
+ // This operator indicates that this portion of the aligned sequence is an
+ // alignment match to the reference, but a sequence mismatch. This can
+ // indicate a SNP or a read error. This operator is equivalent to SAM's
+ // `X`.
+ SEQUENCE_MISMATCH = 9;
+ }
+
+ Operation operation = 1;
+
+ // The number of genomic bases that the operation runs for. Required.
+ int64 operation_length = 2;
+
+ // `referenceSequence` is only used at mismatches
+ // (`SEQUENCE_MISMATCH`) and deletions (`DELETE`).
+ // Filling this field replaces SAM's MD tag. If the relevant information is
+ // not available, this field is unset.
+ string reference_sequence = 3;
+}
diff --git a/third_party/googleapis/google/genomics/v1/datasets.proto b/third_party/googleapis/google/genomics/v1/datasets.proto
new file mode 100644
index 0000000000..d312f3d1cc
--- /dev/null
+++ b/third_party/googleapis/google/genomics/v1/datasets.proto
@@ -0,0 +1,212 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.genomics.v1;
+
+import "google/api/annotations.proto";
+import "google/iam/v1/iam_policy.proto";
+import "google/iam/v1/policy.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/timestamp.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/genomics/v1;genomics";
+option java_multiple_files = true;
+option java_outer_classname = "DatasetsProto";
+option java_package = "com.google.genomics.v1";
+
+
+// This service manages datasets, which are collections of genomic data.
+service DatasetServiceV1 {
+ // Lists datasets within a project.
+ //
+ // For the definitions of datasets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ rpc ListDatasets(ListDatasetsRequest) returns (ListDatasetsResponse) {
+ option (google.api.http) = { get: "/v1/datasets" };
+ }
+
+ // Creates a new dataset.
+ //
+ // For the definitions of datasets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ rpc CreateDataset(CreateDatasetRequest) returns (Dataset) {
+ option (google.api.http) = { post: "/v1/datasets" body: "dataset" };
+ }
+
+ // Gets a dataset by ID.
+ //
+ // For the definitions of datasets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ rpc GetDataset(GetDatasetRequest) returns (Dataset) {
+ option (google.api.http) = { get: "/v1/datasets/{dataset_id}" };
+ }
+
+ // Updates a dataset.
+ //
+ // For the definitions of datasets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ //
+ // This method supports patch semantics.
+ rpc UpdateDataset(UpdateDatasetRequest) returns (Dataset) {
+ option (google.api.http) = { patch: "/v1/datasets/{dataset_id}" body: "dataset" };
+ }
+
+ // Deletes a dataset and all of its contents (all read group sets,
+ // reference sets, variant sets, call sets, annotation sets, etc.)
+ // This is reversible (up to one week after the deletion) via
+ // the
+ // [datasets.undelete][google.genomics.v1.DatasetServiceV1.UndeleteDataset]
+ // operation.
+ //
+ // For the definitions of datasets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ rpc DeleteDataset(DeleteDatasetRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/datasets/{dataset_id}" };
+ }
+
+ // Undeletes a dataset by restoring a dataset which was deleted via this API.
+ //
+ // For the definitions of datasets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ //
+ // This operation is only possible for a week after the deletion occurred.
+ rpc UndeleteDataset(UndeleteDatasetRequest) returns (Dataset) {
+ option (google.api.http) = { post: "/v1/datasets/{dataset_id}:undelete" body: "*" };
+ }
+
+ // Sets the access control policy on the specified dataset. Replaces any
+ // existing policy.
+ //
+ // For the definitions of datasets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ //
+ // See <a href="/iam/docs/managing-policies#setting_a_policy">Setting a
+ // Policy</a> for more information.
+ rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) {
+ option (google.api.http) = { post: "/v1/{resource=datasets/*}:setIamPolicy" body: "*" };
+ }
+
+ // Gets the access control policy for the dataset. This is empty if the
+ // policy or resource does not exist.
+ //
+ // See <a href="/iam/docs/managing-policies#getting_a_policy">Getting a
+ // Policy</a> for more information.
+ //
+ // For the definitions of datasets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) {
+ option (google.api.http) = { post: "/v1/{resource=datasets/*}:getIamPolicy" body: "*" };
+ }
+
+ // Returns permissions that a caller has on the specified resource.
+ // See <a href="/iam/docs/managing-policies#testing_permissions">Testing
+ // Permissions</a> for more information.
+ //
+ // For the definitions of datasets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) {
+ option (google.api.http) = { post: "/v1/{resource=datasets/*}:testIamPermissions" body: "*" };
+ }
+}
+
+// A Dataset is a collection of genomic data.
+//
+// For more genomics resource definitions, see [Fundamentals of Google
+// Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+message Dataset {
+ // The server-generated dataset ID, unique across all datasets.
+ string id = 1;
+
+ // The Google Cloud project ID that this dataset belongs to.
+ string project_id = 2;
+
+ // The dataset name.
+ string name = 3;
+
+ // The time this dataset was created, in seconds from the epoch.
+ google.protobuf.Timestamp create_time = 4;
+}
+
+// The dataset list request.
+message ListDatasetsRequest {
+ // Required. The Google Cloud project ID to list datasets for.
+ string project_id = 1;
+
+ // The maximum number of results to return in a single page. If unspecified,
+ // defaults to 50. The maximum value is 1024.
+ int32 page_size = 2;
+
+ // The continuation token, which is used to page through large result sets.
+ // To get the next page of results, set this parameter to the value of
+ // `nextPageToken` from the previous response.
+ string page_token = 3;
+}
+
+// The dataset list response.
+message ListDatasetsResponse {
+ // The list of matching Datasets.
+ repeated Dataset datasets = 1;
+
+ // The continuation token, which is used to page through large result sets.
+ // Provide this value in a subsequent request to return the next page of
+ // results. This field will be empty if there aren't any additional results.
+ string next_page_token = 2;
+}
+
+message CreateDatasetRequest {
+ // The dataset to be created. Must contain projectId and name.
+ Dataset dataset = 1;
+}
+
+message UpdateDatasetRequest {
+ // The ID of the dataset to be updated.
+ string dataset_id = 1;
+
+ // The new dataset data.
+ Dataset dataset = 2;
+
+ // An optional mask specifying which fields to update. At this time, the only
+ // mutable field is [name][google.genomics.v1.Dataset.name]. The only
+ // acceptable value is "name". If unspecified, all mutable fields will be
+ // updated.
+ google.protobuf.FieldMask update_mask = 3;
+}
+
+message DeleteDatasetRequest {
+ // The ID of the dataset to be deleted.
+ string dataset_id = 1;
+}
+
+message UndeleteDatasetRequest {
+ // The ID of the dataset to be undeleted.
+ string dataset_id = 1;
+}
+
+message GetDatasetRequest {
+ // The ID of the dataset.
+ string dataset_id = 1;
+}
diff --git a/third_party/googleapis/google/genomics/v1/operations.proto b/third_party/googleapis/google/genomics/v1/operations.proto
new file mode 100644
index 0000000000..12bf7d2a36
--- /dev/null
+++ b/third_party/googleapis/google/genomics/v1/operations.proto
@@ -0,0 +1,77 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.genomics.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/any.proto";
+import "google/protobuf/timestamp.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/genomics/v1;genomics";
+option java_multiple_files = true;
+option java_outer_classname = "OperationsProto";
+option java_package = "com.google.genomics.v1";
+
+
+// Metadata describing an [Operation][google.longrunning.Operation].
+message OperationMetadata {
+ // The Google Cloud Project in which the job is scoped.
+ string project_id = 1;
+
+ // The time at which the job was submitted to the Genomics service.
+ google.protobuf.Timestamp create_time = 2;
+
+ // The time at which the job began to run.
+ google.protobuf.Timestamp start_time = 3;
+
+ // The time at which the job stopped running.
+ google.protobuf.Timestamp end_time = 4;
+
+ // The original request that started the operation. Note that this will be in
+ // current version of the API. If the operation was started with v1beta2 API
+ // and a GetOperation is performed on v1 API, a v1 request will be returned.
+ google.protobuf.Any request = 5;
+
+ // Optional event messages that were generated during the job's execution.
+ // This also contains any warnings that were generated during import
+ // or export.
+ repeated OperationEvent events = 6;
+
+ // This field is deprecated. Use `labels` instead. Optionally provided by the
+ // caller when submitting the request that creates the operation.
+ string client_id = 7;
+
+ // Runtime metadata on this Operation.
+ google.protobuf.Any runtime_metadata = 8;
+
+ // Optionally provided by the caller when submitting the request that creates
+ // the operation.
+ map<string, string> labels = 9;
+}
+
+// An event that occurred during an [Operation][google.longrunning.Operation].
+message OperationEvent {
+ // Optional time of when event started.
+ google.protobuf.Timestamp start_time = 1;
+
+ // Optional time of when event finished. An event can have a start time and no
+ // finish time. If an event has a finish time, there must be a start time.
+ google.protobuf.Timestamp end_time = 2;
+
+ // Required description of event.
+ string description = 3;
+}
diff --git a/third_party/googleapis/google/genomics/v1/position.proto b/third_party/googleapis/google/genomics/v1/position.proto
new file mode 100644
index 0000000000..b4e2403d6c
--- /dev/null
+++ b/third_party/googleapis/google/genomics/v1/position.proto
@@ -0,0 +1,42 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.genomics.v1;
+
+import "google/api/annotations.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/genomics/v1;genomics";
+option java_multiple_files = true;
+option java_outer_classname = "PositionProto";
+option java_package = "com.google.genomics.v1";
+
+
+// An abstraction for referring to a genomic position, in relation to some
+// already known reference. For now, represents a genomic position as a
+// reference name, a base number on that reference (0-based), and a
+// determination of forward or reverse strand.
+message Position {
+ // The name of the reference in whatever reference set is being used.
+ string reference_name = 1;
+
+ // The 0-based offset from the start of the forward strand for that reference.
+ int64 position = 2;
+
+ // Whether this position is on the reverse strand, as opposed to the forward
+ // strand.
+ bool reverse_strand = 3;
+}
diff --git a/third_party/googleapis/google/genomics/v1/range.proto b/third_party/googleapis/google/genomics/v1/range.proto
new file mode 100644
index 0000000000..4f2a454050
--- /dev/null
+++ b/third_party/googleapis/google/genomics/v1/range.proto
@@ -0,0 +1,39 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.genomics.v1;
+
+import "google/api/annotations.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/genomics/v1;genomics";
+option java_multiple_files = true;
+option java_outer_classname = "RangeProto";
+option java_package = "com.google.genomics.v1";
+
+
+// A 0-based half-open genomic coordinate range for search requests.
+message Range {
+ // The reference sequence name, for example `chr1`,
+ // `1`, or `chrX`.
+ string reference_name = 1;
+
+ // The start position of the range on the reference, 0-based inclusive.
+ int64 start = 2;
+
+ // The end position of the range on the reference, 0-based exclusive.
+ int64 end = 3;
+}
diff --git a/third_party/googleapis/google/genomics/v1/readalignment.proto b/third_party/googleapis/google/genomics/v1/readalignment.proto
new file mode 100644
index 0000000000..c505584c54
--- /dev/null
+++ b/third_party/googleapis/google/genomics/v1/readalignment.proto
@@ -0,0 +1,221 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.genomics.v1;
+
+import "google/api/annotations.proto";
+import "google/genomics/v1/cigar.proto";
+import "google/genomics/v1/position.proto";
+import "google/protobuf/struct.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/genomics/v1;genomics";
+option java_multiple_files = true;
+option java_outer_classname = "ReadAlignmentProto";
+option java_package = "com.google.genomics.v1";
+
+
+// A linear alignment can be represented by one CIGAR string. Describes the
+// mapped position and local alignment of the read to the reference.
+message LinearAlignment {
+ // The position of this alignment.
+ Position position = 1;
+
+ // The mapping quality of this alignment. Represents how likely
+ // the read maps to this position as opposed to other locations.
+ //
+ // Specifically, this is -10 log10 Pr(mapping position is wrong), rounded to
+ // the nearest integer.
+ int32 mapping_quality = 2;
+
+ // Represents the local alignment of this sequence (alignment matches, indels,
+ // etc) against the reference.
+ repeated CigarUnit cigar = 3;
+}
+
+// A read alignment describes a linear alignment of a string of DNA to a
+// [reference sequence][google.genomics.v1.Reference], in addition to metadata
+// about the fragment (the molecule of DNA sequenced) and the read (the bases
+// which were read by the sequencer). A read is equivalent to a line in a SAM
+// file. A read belongs to exactly one read group and exactly one
+// [read group set][google.genomics.v1.ReadGroupSet].
+//
+// For more genomics resource definitions, see [Fundamentals of Google
+// Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+//
+// ### Reverse-stranded reads
+//
+// Mapped reads (reads having a non-null `alignment`) can be aligned to either
+// the forward or the reverse strand of their associated reference. Strandedness
+// of a mapped read is encoded by `alignment.position.reverseStrand`.
+//
+// If we consider the reference to be a forward-stranded coordinate space of
+// `[0, reference.length)` with `0` as the left-most position and
+// `reference.length` as the right-most position, reads are always aligned left
+// to right. That is, `alignment.position.position` always refers to the
+// left-most reference coordinate and `alignment.cigar` describes the alignment
+// of this read to the reference from left to right. All per-base fields such as
+// `alignedSequence` and `alignedQuality` share this same left-to-right
+// orientation; this is true of reads which are aligned to either strand. For
+// reverse-stranded reads, this means that `alignedSequence` is the reverse
+// complement of the bases that were originally reported by the sequencing
+// machine.
+//
+// ### Generating a reference-aligned sequence string
+//
+// When interacting with mapped reads, it's often useful to produce a string
+// representing the local alignment of the read to reference. The following
+// pseudocode demonstrates one way of doing this:
+//
+// out = ""
+// offset = 0
+// for c in read.alignment.cigar {
+// switch c.operation {
+// case "ALIGNMENT_MATCH", "SEQUENCE_MATCH", "SEQUENCE_MISMATCH":
+// out += read.alignedSequence[offset:offset+c.operationLength]
+// offset += c.operationLength
+// break
+// case "CLIP_SOFT", "INSERT":
+// offset += c.operationLength
+// break
+// case "PAD":
+// out += repeat("*", c.operationLength)
+// break
+// case "DELETE":
+// out += repeat("-", c.operationLength)
+// break
+// case "SKIP":
+// out += repeat(" ", c.operationLength)
+// break
+// case "CLIP_HARD":
+// break
+// }
+// }
+// return out
+//
+// ### Converting to SAM's CIGAR string
+//
+// The following pseudocode generates a SAM CIGAR string from the
+// `cigar` field. Note that this is a lossy conversion
+// (`cigar.referenceSequence` is lost).
+//
+// cigarMap = {
+// "ALIGNMENT_MATCH": "M",
+// "INSERT": "I",
+// "DELETE": "D",
+// "SKIP": "N",
+// "CLIP_SOFT": "S",
+// "CLIP_HARD": "H",
+// "PAD": "P",
+// "SEQUENCE_MATCH": "=",
+// "SEQUENCE_MISMATCH": "X",
+// }
+// cigarStr = ""
+// for c in read.alignment.cigar {
+// cigarStr += c.operationLength + cigarMap[c.operation]
+// }
+// return cigarStr
+message Read {
+ // The server-generated read ID, unique across all reads. This is different
+ // from the `fragmentName`.
+ string id = 1;
+
+ // The ID of the read group this read belongs to. A read belongs to exactly
+ // one read group. This is a server-generated ID which is distinct from SAM's
+ // RG tag (for that value, see
+ // [ReadGroup.name][google.genomics.v1.ReadGroup.name]).
+ string read_group_id = 2;
+
+ // The ID of the read group set this read belongs to. A read belongs to
+ // exactly one read group set.
+ string read_group_set_id = 3;
+
+ // The fragment name. Equivalent to QNAME (query template name) in SAM.
+ string fragment_name = 4;
+
+ // The orientation and the distance between reads from the fragment are
+ // consistent with the sequencing protocol (SAM flag 0x2).
+ bool proper_placement = 5;
+
+ // The fragment is a PCR or optical duplicate (SAM flag 0x400).
+ bool duplicate_fragment = 6;
+
+ // The observed length of the fragment, equivalent to TLEN in SAM.
+ int32 fragment_length = 7;
+
+ // The read number in sequencing. 0-based and less than numberReads. This
+ // field replaces SAM flag 0x40 and 0x80.
+ int32 read_number = 8;
+
+ // The number of reads in the fragment (extension to SAM flag 0x1).
+ int32 number_reads = 9;
+
+ // Whether this read did not pass filters, such as platform or vendor quality
+ // controls (SAM flag 0x200).
+ bool failed_vendor_quality_checks = 10;
+
+ // The linear alignment for this alignment record. This field is null for
+ // unmapped reads.
+ LinearAlignment alignment = 11;
+
+ // Whether this alignment is secondary. Equivalent to SAM flag 0x100.
+ // A secondary alignment represents an alternative to the primary alignment
+ // for this read. Aligners may return secondary alignments if a read can map
+ // ambiguously to multiple coordinates in the genome. By convention, each read
+ // has one and only one alignment where both `secondaryAlignment`
+ // and `supplementaryAlignment` are false.
+ bool secondary_alignment = 12;
+
+ // Whether this alignment is supplementary. Equivalent to SAM flag 0x800.
+ // Supplementary alignments are used in the representation of a chimeric
+ // alignment. In a chimeric alignment, a read is split into multiple
+ // linear alignments that map to different reference contigs. The first
+ // linear alignment in the read will be designated as the representative
+ // alignment; the remaining linear alignments will be designated as
+ // supplementary alignments. These alignments may have different mapping
+ // quality scores. In each linear alignment in a chimeric alignment, the read
+ // will be hard clipped. The `alignedSequence` and
+ // `alignedQuality` fields in the alignment record will only
+ // represent the bases for its respective linear alignment.
+ bool supplementary_alignment = 13;
+
+ // The bases of the read sequence contained in this alignment record,
+ // **without CIGAR operations applied** (equivalent to SEQ in SAM).
+ // `alignedSequence` and `alignedQuality` may be
+ // shorter than the full read sequence and quality. This will occur if the
+ // alignment is part of a chimeric alignment, or if the read was trimmed. When
+ // this occurs, the CIGAR for this read will begin/end with a hard clip
+ // operator that will indicate the length of the excised sequence.
+ string aligned_sequence = 14;
+
+ // The quality of the read sequence contained in this alignment record
+ // (equivalent to QUAL in SAM).
+ // `alignedSequence` and `alignedQuality` may be shorter than the full read
+ // sequence and quality. This will occur if the alignment is part of a
+ // chimeric alignment, or if the read was trimmed. When this occurs, the CIGAR
+ // for this read will begin/end with a hard clip operator that will indicate
+ // the length of the excised sequence.
+ repeated int32 aligned_quality = 15;
+
+ // The mapping of the primary alignment of the
+ // `(readNumber+1)%numberReads` read in the fragment. It replaces
+ // mate position and mate strand in SAM.
+ Position next_mate_position = 16;
+
+ // A map of additional read alignment information. This must be of the form
+ // map<string, string[]> (string key mapping to a list of string values).
+ map<string, google.protobuf.ListValue> info = 17;
+}
diff --git a/third_party/googleapis/google/genomics/v1/readgroup.proto b/third_party/googleapis/google/genomics/v1/readgroup.proto
new file mode 100644
index 0000000000..73e05769ee
--- /dev/null
+++ b/third_party/googleapis/google/genomics/v1/readgroup.proto
@@ -0,0 +1,106 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.genomics.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/struct.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/genomics/v1;genomics";
+option java_multiple_files = true;
+option java_outer_classname = "ReadGroupProto";
+option java_package = "com.google.genomics.v1";
+
+
+// A read group is all the data that's processed the same way by the sequencer.
+message ReadGroup {
+ message Experiment {
+ // A client-supplied library identifier; a library is a collection of DNA
+ // fragments which have been prepared for sequencing from a sample. This
+ // field is important for quality control as error or bias can be introduced
+ // during sample preparation.
+ string library_id = 1;
+
+ // The platform unit used as part of this experiment, for example
+ // flowcell-barcode.lane for Illumina or slide for SOLiD. Corresponds to the
+ // @RG PU field in the SAM spec.
+ string platform_unit = 2;
+
+ // The sequencing center used as part of this experiment.
+ string sequencing_center = 3;
+
+ // The instrument model used as part of this experiment. This maps to
+ // sequencing technology in the SAM spec.
+ string instrument_model = 4;
+ }
+
+ message Program {
+ // The command line used to run this program.
+ string command_line = 1;
+
+ // The user specified locally unique ID of the program. Used along with
+ // `prevProgramId` to define an ordering between programs.
+ string id = 2;
+
+ // The display name of the program. This is typically the colloquial name of
+ // the tool used, for example 'bwa' or 'picard'.
+ string name = 3;
+
+ // The ID of the program run before this one.
+ string prev_program_id = 4;
+
+ // The version of the program run.
+ string version = 5;
+ }
+
+ // The server-generated read group ID, unique for all read groups.
+ // Note: This is different than the @RG ID field in the SAM spec. For that
+ // value, see [name][google.genomics.v1.ReadGroup.name].
+ string id = 1;
+
+ // The dataset to which this read group belongs.
+ string dataset_id = 2;
+
+ // The read group name. This corresponds to the @RG ID field in the SAM spec.
+ string name = 3;
+
+ // A free-form text description of this read group.
+ string description = 4;
+
+ // A client-supplied sample identifier for the reads in this read group.
+ string sample_id = 5;
+
+ // The experiment used to generate this read group.
+ Experiment experiment = 6;
+
+ // The predicted insert size of this read group. The insert size is the length
+ // the sequenced DNA fragment from end-to-end, not including the adapters.
+ int32 predicted_insert_size = 7;
+
+ // The programs used to generate this read group. Programs are always
+ // identical for all read groups within a read group set. For this reason,
+ // only the first read group in a returned set will have this field
+ // populated.
+ repeated Program programs = 10;
+
+ // The reference set the reads in this read group are aligned to.
+ string reference_set_id = 11;
+
+ // A map of additional read group information. This must be of the form
+ // map<string, string[]> (string key mapping to a list of string values).
+ map<string, google.protobuf.ListValue> info = 12;
+}
diff --git a/third_party/googleapis/google/genomics/v1/readgroupset.proto b/third_party/googleapis/google/genomics/v1/readgroupset.proto
new file mode 100644
index 0000000000..8d5131e1a9
--- /dev/null
+++ b/third_party/googleapis/google/genomics/v1/readgroupset.proto
@@ -0,0 +1,64 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.genomics.v1;
+
+import "google/api/annotations.proto";
+import "google/genomics/v1/readgroup.proto";
+import "google/protobuf/struct.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/genomics/v1;genomics";
+option java_multiple_files = true;
+option java_outer_classname = "ReadGroupSetProto";
+option java_package = "com.google.genomics.v1";
+
+
+// A read group set is a logical collection of read groups, which are
+// collections of reads produced by a sequencer. A read group set typically
+// models reads corresponding to one sample, sequenced one way, and aligned one
+// way.
+//
+// * A read group set belongs to one dataset.
+// * A read group belongs to one read group set.
+// * A read belongs to one read group.
+//
+// For more genomics resource definitions, see [Fundamentals of Google
+// Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+message ReadGroupSet {
+ // The server-generated read group set ID, unique for all read group sets.
+ string id = 1;
+
+ // The dataset to which this read group set belongs.
+ string dataset_id = 2;
+
+ // The reference set to which the reads in this read group set are aligned.
+ string reference_set_id = 3;
+
+ // The read group set name. By default this will be initialized to the sample
+ // name of the sequenced data contained in this set.
+ string name = 4;
+
+ // The filename of the original source file for this read group set, if any.
+ string filename = 5;
+
+ // The read groups in this set. There are typically 1-10 read groups in a read
+ // group set.
+ repeated ReadGroup read_groups = 6;
+
+ // A map of additional read group set information.
+ map<string, google.protobuf.ListValue> info = 7;
+}
diff --git a/third_party/googleapis/google/genomics/v1/reads.proto b/third_party/googleapis/google/genomics/v1/reads.proto
new file mode 100644
index 0000000000..f574707e39
--- /dev/null
+++ b/third_party/googleapis/google/genomics/v1/reads.proto
@@ -0,0 +1,468 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.genomics.v1;
+
+import "google/api/annotations.proto";
+import "google/genomics/v1/range.proto";
+import "google/genomics/v1/readalignment.proto";
+import "google/genomics/v1/readgroupset.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/field_mask.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/genomics/v1;genomics";
+option java_multiple_files = true;
+option java_outer_classname = "ReadsProto";
+option java_package = "com.google.genomics.v1";
+
+
+service StreamingReadService {
+ // Returns a stream of all the reads matching the search request, ordered
+ // by reference name, position, and ID.
+ rpc StreamReads(StreamReadsRequest) returns (stream StreamReadsResponse) {
+ option (google.api.http) = { post: "/v1/reads:stream" body: "*" };
+ }
+}
+
+// The Readstore. A data store for DNA sequencing Reads.
+service ReadServiceV1 {
+ // Creates read group sets by asynchronously importing the provided
+ // information.
+ //
+ // For the definitions of read group sets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ //
+ // The caller must have WRITE permissions to the dataset.
+ //
+ // ## Notes on [BAM](https://samtools.github.io/hts-specs/SAMv1.pdf) import
+ //
+ // - Tags will be converted to strings - tag types are not preserved
+ // - Comments (`@CO`) in the input file header will not be preserved
+ // - Original header order of references (`@SQ`) will not be preserved
+ // - Any reverse stranded unmapped reads will be reverse complemented, and
+ // their qualities (also the "BQ" and "OQ" tags, if any) will be reversed
+ // - Unmapped reads will be stripped of positional information (reference name
+ // and position)
+ rpc ImportReadGroupSets(ImportReadGroupSetsRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1/readgroupsets:import" body: "*" };
+ }
+
+ // Exports a read group set to a BAM file in Google Cloud Storage.
+ //
+ // For the definitions of read group sets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ //
+ // Note that currently there may be some differences between exported BAM
+ // files and the original BAM file at the time of import. See
+ // [ImportReadGroupSets][google.genomics.v1.ReadServiceV1.ImportReadGroupSets]
+ // for caveats.
+ rpc ExportReadGroupSet(ExportReadGroupSetRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1/readgroupsets/{read_group_set_id}:export" body: "*" };
+ }
+
+ // Searches for read group sets matching the criteria.
+ //
+ // For the definitions of read group sets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ //
+ // Implements
+ // [GlobalAllianceApi.searchReadGroupSets](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/readmethods.avdl#L135).
+ rpc SearchReadGroupSets(SearchReadGroupSetsRequest) returns (SearchReadGroupSetsResponse) {
+ option (google.api.http) = { post: "/v1/readgroupsets/search" body: "*" };
+ }
+
+ // Updates a read group set.
+ //
+ // For the definitions of read group sets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ //
+ // This method supports patch semantics.
+ rpc UpdateReadGroupSet(UpdateReadGroupSetRequest) returns (ReadGroupSet) {
+ option (google.api.http) = { patch: "/v1/readgroupsets/{read_group_set_id}" body: "read_group_set" };
+ }
+
+ // Deletes a read group set.
+ //
+ // For the definitions of read group sets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ rpc DeleteReadGroupSet(DeleteReadGroupSetRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/readgroupsets/{read_group_set_id}" };
+ }
+
+ // Gets a read group set by ID.
+ //
+ // For the definitions of read group sets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ rpc GetReadGroupSet(GetReadGroupSetRequest) returns (ReadGroupSet) {
+ option (google.api.http) = { get: "/v1/readgroupsets/{read_group_set_id}" };
+ }
+
+ // Lists fixed width coverage buckets for a read group set, each of which
+ // correspond to a range of a reference sequence. Each bucket summarizes
+ // coverage information across its corresponding genomic range.
+ //
+ // For the definitions of read group sets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ //
+ // Coverage is defined as the number of reads which are aligned to a given
+ // base in the reference sequence. Coverage buckets are available at several
+ // precomputed bucket widths, enabling retrieval of various coverage 'zoom
+ // levels'. The caller must have READ permissions for the target read group
+ // set.
+ rpc ListCoverageBuckets(ListCoverageBucketsRequest) returns (ListCoverageBucketsResponse) {
+ option (google.api.http) = { get: "/v1/readgroupsets/{read_group_set_id}/coveragebuckets" };
+ }
+
+ // Gets a list of reads for one or more read group sets.
+ //
+ // For the definitions of read group sets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ //
+ // Reads search operates over a genomic coordinate space of reference sequence
+ // & position defined over the reference sequences to which the requested
+ // read group sets are aligned.
+ //
+ // If a target positional range is specified, search returns all reads whose
+ // alignment to the reference genome overlap the range. A query which
+ // specifies only read group set IDs yields all reads in those read group
+ // sets, including unmapped reads.
+ //
+ // All reads returned (including reads on subsequent pages) are ordered by
+ // genomic coordinate (by reference sequence, then position). Reads with
+ // equivalent genomic coordinates are returned in an unspecified order. This
+ // order is consistent, such that two queries for the same content (regardless
+ // of page size) yield reads in the same order across their respective streams
+ // of paginated responses.
+ //
+ // Implements
+ // [GlobalAllianceApi.searchReads](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/readmethods.avdl#L85).
+ rpc SearchReads(SearchReadsRequest) returns (SearchReadsResponse) {
+ option (google.api.http) = { post: "/v1/reads/search" body: "*" };
+ }
+}
+
+// The read group set search request.
+message SearchReadGroupSetsRequest {
+ // Restricts this query to read group sets within the given datasets. At least
+ // one ID must be provided.
+ repeated string dataset_ids = 1;
+
+ // Only return read group sets for which a substring of the name matches this
+ // string.
+ string name = 3;
+
+ // The continuation token, which is used to page through large result sets.
+ // To get the next page of results, set this parameter to the value of
+ // `nextPageToken` from the previous response.
+ string page_token = 2;
+
+ // The maximum number of results to return in a single page. If unspecified,
+ // defaults to 256. The maximum value is 1024.
+ int32 page_size = 4;
+}
+
+// The read group set search response.
+message SearchReadGroupSetsResponse {
+ // The list of matching read group sets.
+ repeated ReadGroupSet read_group_sets = 1;
+
+ // The continuation token, which is used to page through large result sets.
+ // Provide this value in a subsequent request to return the next page of
+ // results. This field will be empty if there aren't any additional results.
+ string next_page_token = 2;
+}
+
+// The read group set import request.
+message ImportReadGroupSetsRequest {
+ enum PartitionStrategy {
+ PARTITION_STRATEGY_UNSPECIFIED = 0;
+
+ // In most cases, this strategy yields one read group set per file. This is
+ // the default behavior.
+ //
+ // Allocate one read group set per file per sample. For BAM files, read
+ // groups are considered to share a sample if they have identical sample
+ // names. Furthermore, all reads for each file which do not belong to a read
+ // group, if any, will be grouped into a single read group set per-file.
+ PER_FILE_PER_SAMPLE = 1;
+
+ // Includes all read groups in all imported files into a single read group
+ // set. Requires that the headers for all imported files are equivalent. All
+ // reads which do not belong to a read group, if any, will be grouped into a
+ // separate read group set.
+ MERGE_ALL = 2;
+ }
+
+ // Required. The ID of the dataset these read group sets will belong to. The
+ // caller must have WRITE permissions to this dataset.
+ string dataset_id = 1;
+
+ // The reference set to which the imported read group sets are aligned to, if
+ // any. The reference names of this reference set must be a superset of those
+ // found in the imported file headers. If no reference set id is provided, a
+ // best effort is made to associate with a matching reference set.
+ string reference_set_id = 4;
+
+ // A list of URIs pointing at [BAM
+ // files](https://samtools.github.io/hts-specs/SAMv1.pdf)
+ // in Google Cloud Storage.
+ // Those URIs can include wildcards (*), but do not add or remove
+ // matching files before import has completed.
+ //
+ // Note that Google Cloud Storage object listing is only eventually
+ // consistent: files added may be not be immediately visible to
+ // everyone. Thus, if using a wildcard it is preferable not to start
+ // the import immediately after the files are created.
+ repeated string source_uris = 2;
+
+ // The partition strategy describes how read groups are partitioned into read
+ // group sets.
+ PartitionStrategy partition_strategy = 5;
+}
+
+// The read group set import response.
+message ImportReadGroupSetsResponse {
+ // IDs of the read group sets that were created.
+ repeated string read_group_set_ids = 1;
+}
+
+// The read group set export request.
+message ExportReadGroupSetRequest {
+ // Required. The Google Cloud project ID that owns this
+ // export. The caller must have WRITE access to this project.
+ string project_id = 1;
+
+ // Required. A Google Cloud Storage URI for the exported BAM file.
+ // The currently authenticated user must have write access to the new file.
+ // An error will be returned if the URI already contains data.
+ string export_uri = 2;
+
+ // Required. The ID of the read group set to export. The caller must have
+ // READ access to this read group set.
+ string read_group_set_id = 3;
+
+ // The reference names to export. If this is not specified, all reference
+ // sequences, including unmapped reads, are exported.
+ // Use `*` to export only unmapped reads.
+ repeated string reference_names = 4;
+}
+
+message UpdateReadGroupSetRequest {
+ // The ID of the read group set to be updated. The caller must have WRITE
+ // permissions to the dataset associated with this read group set.
+ string read_group_set_id = 1;
+
+ // The new read group set data. See `updateMask` for details on mutability of
+ // fields.
+ ReadGroupSet read_group_set = 2;
+
+ // An optional mask specifying which fields to update. Supported fields:
+ //
+ // * [name][google.genomics.v1.ReadGroupSet.name].
+ // * [referenceSetId][google.genomics.v1.ReadGroupSet.reference_set_id].
+ //
+ // Leaving `updateMask` unset is equivalent to specifying all mutable
+ // fields.
+ google.protobuf.FieldMask update_mask = 3;
+}
+
+message DeleteReadGroupSetRequest {
+ // The ID of the read group set to be deleted. The caller must have WRITE
+ // permissions to the dataset associated with this read group set.
+ string read_group_set_id = 1;
+}
+
+message GetReadGroupSetRequest {
+ // The ID of the read group set.
+ string read_group_set_id = 1;
+}
+
+message ListCoverageBucketsRequest {
+ // Required. The ID of the read group set over which coverage is requested.
+ string read_group_set_id = 1;
+
+ // The name of the reference to query, within the reference set associated
+ // with this query. Optional.
+ string reference_name = 3;
+
+ // The start position of the range on the reference, 0-based inclusive. If
+ // specified, `referenceName` must also be specified. Defaults to 0.
+ int64 start = 4;
+
+ // The end position of the range on the reference, 0-based exclusive. If
+ // specified, `referenceName` must also be specified. If unset or 0, defaults
+ // to the length of the reference.
+ int64 end = 5;
+
+ // The desired width of each reported coverage bucket in base pairs. This
+ // will be rounded down to the nearest precomputed bucket width; the value
+ // of which is returned as `bucketWidth` in the response. Defaults
+ // to infinity (each bucket spans an entire reference sequence) or the length
+ // of the target range, if specified. The smallest precomputed
+ // `bucketWidth` is currently 2048 base pairs; this is subject to
+ // change.
+ int64 target_bucket_width = 6;
+
+ // The continuation token, which is used to page through large result sets.
+ // To get the next page of results, set this parameter to the value of
+ // `nextPageToken` from the previous response.
+ string page_token = 7;
+
+ // The maximum number of results to return in a single page. If unspecified,
+ // defaults to 1024. The maximum value is 2048.
+ int32 page_size = 8;
+}
+
+// A bucket over which read coverage has been precomputed. A bucket corresponds
+// to a specific range of the reference sequence.
+message CoverageBucket {
+ // The genomic coordinate range spanned by this bucket.
+ Range range = 1;
+
+ // The average number of reads which are aligned to each individual
+ // reference base in this bucket.
+ float mean_coverage = 2;
+}
+
+message ListCoverageBucketsResponse {
+ // The length of each coverage bucket in base pairs. Note that buckets at the
+ // end of a reference sequence may be shorter. This value is omitted if the
+ // bucket width is infinity (the default behaviour, with no range or
+ // `targetBucketWidth`).
+ int64 bucket_width = 1;
+
+ // The coverage buckets. The list of buckets is sparse; a bucket with 0
+ // overlapping reads is not returned. A bucket never crosses more than one
+ // reference sequence. Each bucket has width `bucketWidth`, unless
+ // its end is the end of the reference sequence.
+ repeated CoverageBucket coverage_buckets = 2;
+
+ // The continuation token, which is used to page through large result sets.
+ // Provide this value in a subsequent request to return the next page of
+ // results. This field will be empty if there aren't any additional results.
+ string next_page_token = 3;
+}
+
+// The read search request.
+message SearchReadsRequest {
+ // The IDs of the read groups sets within which to search for reads. All
+ // specified read group sets must be aligned against a common set of reference
+ // sequences; this defines the genomic coordinates for the query. Must specify
+ // one of `readGroupSetIds` or `readGroupIds`.
+ repeated string read_group_set_ids = 1;
+
+ // The IDs of the read groups within which to search for reads. All specified
+ // read groups must belong to the same read group sets. Must specify one of
+ // `readGroupSetIds` or `readGroupIds`.
+ repeated string read_group_ids = 5;
+
+ // The reference sequence name, for example `chr1`, `1`, or `chrX`. If set to
+ // `*`, only unmapped reads are returned. If unspecified, all reads (mapped
+ // and unmapped) are returned.
+ string reference_name = 7;
+
+ // The start position of the range on the reference, 0-based inclusive. If
+ // specified, `referenceName` must also be specified.
+ int64 start = 8;
+
+ // The end position of the range on the reference, 0-based exclusive. If
+ // specified, `referenceName` must also be specified.
+ int64 end = 9;
+
+ // The continuation token, which is used to page through large result sets.
+ // To get the next page of results, set this parameter to the value of
+ // `nextPageToken` from the previous response.
+ string page_token = 3;
+
+ // The maximum number of results to return in a single page. If unspecified,
+ // defaults to 256. The maximum value is 2048.
+ int32 page_size = 4;
+}
+
+// The read search response.
+message SearchReadsResponse {
+ // The list of matching alignments sorted by mapped genomic coordinate,
+ // if any, ascending in position within the same reference. Unmapped reads,
+ // which have no position, are returned contiguously and are sorted in
+ // ascending lexicographic order by fragment name.
+ repeated Read alignments = 1;
+
+ // The continuation token, which is used to page through large result sets.
+ // Provide this value in a subsequent request to return the next page of
+ // results. This field will be empty if there aren't any additional results.
+ string next_page_token = 2;
+}
+
+// The stream reads request.
+message StreamReadsRequest {
+ // The Google Cloud project ID which will be billed
+ // for this access. The caller must have WRITE access to this project.
+ // Required.
+ string project_id = 1;
+
+ // The ID of the read group set from which to stream reads.
+ string read_group_set_id = 2;
+
+ // The reference sequence name, for example `chr1`,
+ // `1`, or `chrX`. If set to *, only unmapped reads are
+ // returned.
+ string reference_name = 3;
+
+ // The start position of the range on the reference, 0-based inclusive. If
+ // specified, `referenceName` must also be specified.
+ int64 start = 4;
+
+ // The end position of the range on the reference, 0-based exclusive. If
+ // specified, `referenceName` must also be specified.
+ int64 end = 5;
+
+ // Restricts results to a shard containing approximately `1/totalShards`
+ // of the normal response payload for this query. Results from a sharded
+ // request are disjoint from those returned by all queries which differ only
+ // in their shard parameter. A shard may yield 0 results; this is especially
+ // likely for large values of `totalShards`.
+ //
+ // Valid values are `[0, totalShards)`.
+ int32 shard = 6;
+
+ // Specifying `totalShards` causes a disjoint subset of the normal response
+ // payload to be returned for each query with a unique `shard` parameter
+ // specified. A best effort is made to yield equally sized shards. Sharding
+ // can be used to distribute processing amongst workers, where each worker is
+ // assigned a unique `shard` number and all workers specify the same
+ // `totalShards` number. The union of reads returned for all sharded queries
+ // `[0, totalShards)` is equal to those returned by a single unsharded query.
+ //
+ // Queries for different values of `totalShards` with common divisors will
+ // share shard boundaries. For example, streaming `shard` 2 of 5
+ // `totalShards` yields the same results as streaming `shard`s 4 and 5 of 10
+ // `totalShards`. This property can be leveraged for adaptive retries.
+ int32 total_shards = 7;
+}
+
+message StreamReadsResponse {
+ repeated Read alignments = 1;
+}
diff --git a/third_party/googleapis/google/genomics/v1/references.proto b/third_party/googleapis/google/genomics/v1/references.proto
new file mode 100644
index 0000000000..4ab107375d
--- /dev/null
+++ b/third_party/googleapis/google/genomics/v1/references.proto
@@ -0,0 +1,282 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.genomics.v1;
+
+import "google/api/annotations.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/genomics/v1;genomics";
+option java_multiple_files = true;
+option java_outer_classname = "ReferencesProto";
+option java_package = "com.google.genomics.v1";
+
+
+service ReferenceServiceV1 {
+ // Searches for reference sets which match the given criteria.
+ //
+ // For the definitions of references and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ //
+ // Implements
+ // [GlobalAllianceApi.searchReferenceSets](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L71)
+ rpc SearchReferenceSets(SearchReferenceSetsRequest) returns (SearchReferenceSetsResponse) {
+ option (google.api.http) = { post: "/v1/referencesets/search" body: "*" };
+ }
+
+ // Gets a reference set.
+ //
+ // For the definitions of references and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ //
+ // Implements
+ // [GlobalAllianceApi.getReferenceSet](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L83).
+ rpc GetReferenceSet(GetReferenceSetRequest) returns (ReferenceSet) {
+ option (google.api.http) = { get: "/v1/referencesets/{reference_set_id}" };
+ }
+
+ // Searches for references which match the given criteria.
+ //
+ // For the definitions of references and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ //
+ // Implements
+ // [GlobalAllianceApi.searchReferences](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L146).
+ rpc SearchReferences(SearchReferencesRequest) returns (SearchReferencesResponse) {
+ option (google.api.http) = { post: "/v1/references/search" body: "*" };
+ }
+
+ // Gets a reference.
+ //
+ // For the definitions of references and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ //
+ // Implements
+ // [GlobalAllianceApi.getReference](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L158).
+ rpc GetReference(GetReferenceRequest) returns (Reference) {
+ option (google.api.http) = { get: "/v1/references/{reference_id}" };
+ }
+
+ // Lists the bases in a reference, optionally restricted to a range.
+ //
+ // For the definitions of references and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ //
+ // Implements
+ // [GlobalAllianceApi.getReferenceBases](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L221).
+ rpc ListBases(ListBasesRequest) returns (ListBasesResponse) {
+ option (google.api.http) = { get: "/v1/references/{reference_id}/bases" };
+ }
+}
+
+// A reference is a canonical assembled DNA sequence, intended to act as a
+// reference coordinate space for other genomic annotations. A single reference
+// might represent the human chromosome 1 or mitochandrial DNA, for instance. A
+// reference belongs to one or more reference sets.
+//
+// For more genomics resource definitions, see [Fundamentals of Google
+// Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+message Reference {
+ // The server-generated reference ID, unique across all references.
+ string id = 1;
+
+ // The length of this reference's sequence.
+ int64 length = 2;
+
+ // MD5 of the upper-case sequence excluding all whitespace characters (this
+ // is equivalent to SQ:M5 in SAM). This value is represented in lower case
+ // hexadecimal format.
+ string md5checksum = 3;
+
+ // The name of this reference, for example `22`.
+ string name = 4;
+
+ // The URI from which the sequence was obtained. Typically specifies a FASTA
+ // format file.
+ string source_uri = 5;
+
+ // All known corresponding accession IDs in INSDC (GenBank/ENA/DDBJ) ideally
+ // with a version number, for example `GCF_000001405.26`.
+ repeated string source_accessions = 6;
+
+ // ID from http://www.ncbi.nlm.nih.gov/taxonomy. For example, 9606 for human.
+ int32 ncbi_taxon_id = 7;
+}
+
+// A reference set is a set of references which typically comprise a reference
+// assembly for a species, such as `GRCh38` which is representative
+// of the human genome. A reference set defines a common coordinate space for
+// comparing reference-aligned experimental data. A reference set contains 1 or
+// more references.
+//
+// For more genomics resource definitions, see [Fundamentals of Google
+// Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+message ReferenceSet {
+ // The server-generated reference set ID, unique across all reference sets.
+ string id = 1;
+
+ // The IDs of the reference objects that are part of this set.
+ // `Reference.md5checksum` must be unique within this set.
+ repeated string reference_ids = 2;
+
+ // Order-independent MD5 checksum which identifies this reference set. The
+ // checksum is computed by sorting all lower case hexidecimal string
+ // `reference.md5checksum` (for all reference in this set) in
+ // ascending lexicographic order, concatenating, and taking the MD5 of that
+ // value. The resulting value is represented in lower case hexadecimal format.
+ string md5checksum = 3;
+
+ // ID from http://www.ncbi.nlm.nih.gov/taxonomy (for example, 9606 for human)
+ // indicating the species which this reference set is intended to model. Note
+ // that contained references may specify a different `ncbiTaxonId`, as
+ // assemblies may contain reference sequences which do not belong to the
+ // modeled species, for example EBV in a human reference genome.
+ int32 ncbi_taxon_id = 4;
+
+ // Free text description of this reference set.
+ string description = 5;
+
+ // Public id of this reference set, such as `GRCh37`.
+ string assembly_id = 6;
+
+ // The URI from which the references were obtained.
+ string source_uri = 7;
+
+ // All known corresponding accession IDs in INSDC (GenBank/ENA/DDBJ) ideally
+ // with a version number, for example `NC_000001.11`.
+ repeated string source_accessions = 8;
+}
+
+message SearchReferenceSetsRequest {
+ // If present, return reference sets for which the
+ // [md5checksum][google.genomics.v1.ReferenceSet.md5checksum] matches exactly.
+ repeated string md5checksums = 1;
+
+ // If present, return reference sets for which a prefix of any of
+ // [sourceAccessions][google.genomics.v1.ReferenceSet.source_accessions]
+ // match any of these strings. Accession numbers typically have a main number
+ // and a version, for example `NC_000001.11`.
+ repeated string accessions = 2;
+
+ // If present, return reference sets for which a substring of their
+ // `assemblyId` matches this string (case insensitive).
+ string assembly_id = 3;
+
+ // The continuation token, which is used to page through large result sets.
+ // To get the next page of results, set this parameter to the value of
+ // `nextPageToken` from the previous response.
+ string page_token = 4;
+
+ // The maximum number of results to return in a single page. If unspecified,
+ // defaults to 1024. The maximum value is 4096.
+ int32 page_size = 5;
+}
+
+message SearchReferenceSetsResponse {
+ // The matching references sets.
+ repeated ReferenceSet reference_sets = 1;
+
+ // The continuation token, which is used to page through large result sets.
+ // Provide this value in a subsequent request to return the next page of
+ // results. This field will be empty if there aren't any additional results.
+ string next_page_token = 2;
+}
+
+message GetReferenceSetRequest {
+ // The ID of the reference set.
+ string reference_set_id = 1;
+}
+
+message SearchReferencesRequest {
+ // If present, return references for which the
+ // [md5checksum][google.genomics.v1.Reference.md5checksum] matches exactly.
+ repeated string md5checksums = 1;
+
+ // If present, return references for which a prefix of any of
+ // [sourceAccessions][google.genomics.v1.Reference.source_accessions] match
+ // any of these strings. Accession numbers typically have a main number and a
+ // version, for example `GCF_000001405.26`.
+ repeated string accessions = 2;
+
+ // If present, return only references which belong to this reference set.
+ string reference_set_id = 3;
+
+ // The continuation token, which is used to page through large result sets.
+ // To get the next page of results, set this parameter to the value of
+ // `nextPageToken` from the previous response.
+ string page_token = 4;
+
+ // The maximum number of results to return in a single page. If unspecified,
+ // defaults to 1024. The maximum value is 4096.
+ int32 page_size = 5;
+}
+
+message SearchReferencesResponse {
+ // The matching references.
+ repeated Reference references = 1;
+
+ // The continuation token, which is used to page through large result sets.
+ // Provide this value in a subsequent request to return the next page of
+ // results. This field will be empty if there aren't any additional results.
+ string next_page_token = 2;
+}
+
+message GetReferenceRequest {
+ // The ID of the reference.
+ string reference_id = 1;
+}
+
+message ListBasesRequest {
+ // The ID of the reference.
+ string reference_id = 1;
+
+ // The start position (0-based) of this query. Defaults to 0.
+ int64 start = 2;
+
+ // The end position (0-based, exclusive) of this query. Defaults to the length
+ // of this reference.
+ int64 end = 3;
+
+ // The continuation token, which is used to page through large result sets.
+ // To get the next page of results, set this parameter to the value of
+ // `nextPageToken` from the previous response.
+ string page_token = 4;
+
+ // The maximum number of bases to return in a single page. If unspecified,
+ // defaults to 200Kbp (kilo base pairs). The maximum value is 10Mbp (mega base
+ // pairs).
+ int32 page_size = 5;
+}
+
+message ListBasesResponse {
+ // The offset position (0-based) of the given `sequence` from the
+ // start of this `Reference`. This value will differ for each page
+ // in a paginated request.
+ int64 offset = 1;
+
+ // A substring of the bases that make up this reference.
+ string sequence = 2;
+
+ // The continuation token, which is used to page through large result sets.
+ // Provide this value in a subsequent request to return the next page of
+ // results. This field will be empty if there aren't any additional results.
+ string next_page_token = 3;
+}
diff --git a/third_party/googleapis/google/genomics/v1/variants.proto b/third_party/googleapis/google/genomics/v1/variants.proto
new file mode 100644
index 0000000000..bdd80f9860
--- /dev/null
+++ b/third_party/googleapis/google/genomics/v1/variants.proto
@@ -0,0 +1,903 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.genomics.v1;
+
+import "google/api/annotations.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/struct.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/genomics/v1;genomics";
+option java_multiple_files = true;
+option java_outer_classname = "VariantsProto";
+option java_package = "com.google.genomics.v1";
+
+
+service StreamingVariantService {
+ // Returns a stream of all the variants matching the search request, ordered
+ // by reference name, position, and ID.
+ rpc StreamVariants(StreamVariantsRequest) returns (stream StreamVariantsResponse) {
+ option (google.api.http) = { post: "/v1/variants:stream" body: "*" };
+ }
+}
+
+service VariantServiceV1 {
+ // Creates variant data by asynchronously importing the provided information.
+ //
+ // For the definitions of variant sets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ //
+ // The variants for import will be merged with any existing variant that
+ // matches its reference sequence, start, end, reference bases, and
+ // alternative bases. If no such variant exists, a new one will be created.
+ //
+ // When variants are merged, the call information from the new variant
+ // is added to the existing variant, and Variant info fields are merged
+ // as specified in
+ // [infoMergeConfig][google.genomics.v1.ImportVariantsRequest.info_merge_config].
+ // As a special case, for single-sample VCF files, QUAL and FILTER fields will
+ // be moved to the call level; these are sometimes interpreted in a
+ // call-specific context.
+ // Imported VCF headers are appended to the metadata already in a variant set.
+ rpc ImportVariants(ImportVariantsRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1/variants:import" body: "*" };
+ }
+
+ // Creates a new variant set.
+ //
+ // For the definitions of variant sets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ //
+ // The provided variant set must have a valid `datasetId` set - all other
+ // fields are optional. Note that the `id` field will be ignored, as this is
+ // assigned by the server.
+ rpc CreateVariantSet(CreateVariantSetRequest) returns (VariantSet) {
+ option (google.api.http) = { post: "/v1/variantsets" body: "variant_set" };
+ }
+
+ // Exports variant set data to an external destination.
+ //
+ // For the definitions of variant sets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ rpc ExportVariantSet(ExportVariantSetRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1/variantsets/{variant_set_id}:export" body: "*" };
+ }
+
+ // Gets a variant set by ID.
+ //
+ // For the definitions of variant sets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ rpc GetVariantSet(GetVariantSetRequest) returns (VariantSet) {
+ option (google.api.http) = { get: "/v1/variantsets/{variant_set_id}" };
+ }
+
+ // Returns a list of all variant sets matching search criteria.
+ //
+ // For the definitions of variant sets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ //
+ // Implements
+ // [GlobalAllianceApi.searchVariantSets](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/variantmethods.avdl#L49).
+ rpc SearchVariantSets(SearchVariantSetsRequest) returns (SearchVariantSetsResponse) {
+ option (google.api.http) = { post: "/v1/variantsets/search" body: "*" };
+ }
+
+ // Deletes a variant set including all variants, call sets, and calls within.
+ // This is not reversible.
+ //
+ // For the definitions of variant sets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ rpc DeleteVariantSet(DeleteVariantSetRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/variantsets/{variant_set_id}" };
+ }
+
+ // Updates a variant set using patch semantics.
+ //
+ // For the definitions of variant sets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ rpc UpdateVariantSet(UpdateVariantSetRequest) returns (VariantSet) {
+ option (google.api.http) = { patch: "/v1/variantsets/{variant_set_id}" body: "variant_set" };
+ }
+
+ // Gets a list of variants matching the criteria.
+ //
+ // For the definitions of variants and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ //
+ // Implements
+ // [GlobalAllianceApi.searchVariants](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/variantmethods.avdl#L126).
+ rpc SearchVariants(SearchVariantsRequest) returns (SearchVariantsResponse) {
+ option (google.api.http) = { post: "/v1/variants/search" body: "*" };
+ }
+
+ // Creates a new variant.
+ //
+ // For the definitions of variants and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ rpc CreateVariant(CreateVariantRequest) returns (Variant) {
+ option (google.api.http) = { post: "/v1/variants" body: "variant" };
+ }
+
+ // Updates a variant.
+ //
+ // For the definitions of variants and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ //
+ // This method supports patch semantics. Returns the modified variant without
+ // its calls.
+ rpc UpdateVariant(UpdateVariantRequest) returns (Variant) {
+ option (google.api.http) = { patch: "/v1/variants/{variant_id}" body: "variant" };
+ }
+
+ // Deletes a variant.
+ //
+ // For the definitions of variants and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ rpc DeleteVariant(DeleteVariantRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/variants/{variant_id}" };
+ }
+
+ // Gets a variant by ID.
+ //
+ // For the definitions of variants and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ rpc GetVariant(GetVariantRequest) returns (Variant) {
+ option (google.api.http) = { get: "/v1/variants/{variant_id}" };
+ }
+
+ // Merges the given variants with existing variants.
+ //
+ // For the definitions of variants and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ //
+ // Each variant will be
+ // merged with an existing variant that matches its reference sequence,
+ // start, end, reference bases, and alternative bases. If no such variant
+ // exists, a new one will be created.
+ //
+ // When variants are merged, the call information from the new variant
+ // is added to the existing variant. Variant info fields are merged as
+ // specified in the
+ // [infoMergeConfig][google.genomics.v1.MergeVariantsRequest.info_merge_config]
+ // field of the MergeVariantsRequest.
+ //
+ // Please exercise caution when using this method! It is easy to introduce
+ // mistakes in existing variants and difficult to back out of them. For
+ // example,
+ // suppose you were trying to merge a new variant with an existing one and
+ // both
+ // variants contain calls that belong to callsets with the same callset ID.
+ //
+ // // Existing variant - irrelevant fields trimmed for clarity
+ // {
+ // "variantSetId": "10473108253681171589",
+ // "referenceName": "1",
+ // "start": "10582",
+ // "referenceBases": "G",
+ // "alternateBases": [
+ // "A"
+ // ],
+ // "calls": [
+ // {
+ // "callSetId": "10473108253681171589-0",
+ // "callSetName": "CALLSET0",
+ // "genotype": [
+ // 0,
+ // 1
+ // ],
+ // }
+ // ]
+ // }
+ //
+ // // New variant with conflicting call information
+ // {
+ // "variantSetId": "10473108253681171589",
+ // "referenceName": "1",
+ // "start": "10582",
+ // "referenceBases": "G",
+ // "alternateBases": [
+ // "A"
+ // ],
+ // "calls": [
+ // {
+ // "callSetId": "10473108253681171589-0",
+ // "callSetName": "CALLSET0",
+ // "genotype": [
+ // 1,
+ // 1
+ // ],
+ // }
+ // ]
+ // }
+ //
+ // The resulting merged variant would overwrite the existing calls with those
+ // from the new variant:
+ //
+ // {
+ // "variantSetId": "10473108253681171589",
+ // "referenceName": "1",
+ // "start": "10582",
+ // "referenceBases": "G",
+ // "alternateBases": [
+ // "A"
+ // ],
+ // "calls": [
+ // {
+ // "callSetId": "10473108253681171589-0",
+ // "callSetName": "CALLSET0",
+ // "genotype": [
+ // 1,
+ // 1
+ // ],
+ // }
+ // ]
+ // }
+ //
+ // This may be the desired outcome, but it is up to the user to determine if
+ // if that is indeed the case.
+ rpc MergeVariants(MergeVariantsRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { post: "/v1/variants:merge" body: "*" };
+ }
+
+ // Gets a list of call sets matching the criteria.
+ //
+ // For the definitions of call sets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ //
+ // Implements
+ // [GlobalAllianceApi.searchCallSets](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/variantmethods.avdl#L178).
+ rpc SearchCallSets(SearchCallSetsRequest) returns (SearchCallSetsResponse) {
+ option (google.api.http) = { post: "/v1/callsets/search" body: "*" };
+ }
+
+ // Creates a new call set.
+ //
+ // For the definitions of call sets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ rpc CreateCallSet(CreateCallSetRequest) returns (CallSet) {
+ option (google.api.http) = { post: "/v1/callsets" body: "call_set" };
+ }
+
+ // Updates a call set.
+ //
+ // For the definitions of call sets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ //
+ // This method supports patch semantics.
+ rpc UpdateCallSet(UpdateCallSetRequest) returns (CallSet) {
+ option (google.api.http) = { patch: "/v1/callsets/{call_set_id}" body: "call_set" };
+ }
+
+ // Deletes a call set.
+ //
+ // For the definitions of call sets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ rpc DeleteCallSet(DeleteCallSetRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/callsets/{call_set_id}" };
+ }
+
+ // Gets a call set by ID.
+ //
+ // For the definitions of call sets and other genomics resources, see
+ // [Fundamentals of Google
+ // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+ rpc GetCallSet(GetCallSetRequest) returns (CallSet) {
+ option (google.api.http) = { get: "/v1/callsets/{call_set_id}" };
+ }
+}
+
+// Metadata describes a single piece of variant call metadata.
+// These data include a top level key and either a single value string (value)
+// or a list of key-value pairs (info.)
+// Value and info are mutually exclusive.
+message VariantSetMetadata {
+ enum Type {
+ TYPE_UNSPECIFIED = 0;
+
+ INTEGER = 1;
+
+ FLOAT = 2;
+
+ FLAG = 3;
+
+ CHARACTER = 4;
+
+ STRING = 5;
+ }
+
+ // The top-level key.
+ string key = 1;
+
+ // The value field for simple metadata
+ string value = 2;
+
+ // User-provided ID field, not enforced by this API.
+ // Two or more pieces of structured metadata with identical
+ // id and key fields are considered equivalent.
+ string id = 4;
+
+ // The type of data. Possible types include: Integer, Float,
+ // Flag, Character, and String.
+ Type type = 5;
+
+ // The number of values that can be included in a field described by this
+ // metadata.
+ string number = 8;
+
+ // A textual description of this metadata.
+ string description = 7;
+
+ // Remaining structured metadata key-value pairs. This must be of the form
+ // map<string, string[]> (string key mapping to a list of string values).
+ map<string, google.protobuf.ListValue> info = 3;
+}
+
+// A variant set is a collection of call sets and variants. It contains summary
+// statistics of those contents. A variant set belongs to a dataset.
+//
+// For more genomics resource definitions, see [Fundamentals of Google
+// Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+message VariantSet {
+ // The dataset to which this variant set belongs.
+ string dataset_id = 1;
+
+ // The server-generated variant set ID, unique across all variant sets.
+ string id = 2;
+
+ // The reference set to which the variant set is mapped. The reference set
+ // describes the alignment provenance of the variant set, while the
+ // `referenceBounds` describe the shape of the actual variant data. The
+ // reference set's reference names are a superset of those found in the
+ // `referenceBounds`.
+ //
+ // For example, given a variant set that is mapped to the GRCh38 reference set
+ // and contains a single variant on reference 'X', `referenceBounds` would
+ // contain only an entry for 'X', while the associated reference set
+ // enumerates all possible references: '1', '2', 'X', 'Y', 'MT', etc.
+ string reference_set_id = 6;
+
+ // A list of all references used by the variants in a variant set
+ // with associated coordinate upper bounds for each one.
+ repeated ReferenceBound reference_bounds = 5;
+
+ // The metadata associated with this variant set.
+ repeated VariantSetMetadata metadata = 4;
+
+ // User-specified, mutable name.
+ string name = 7;
+
+ // A textual description of this variant set.
+ string description = 8;
+}
+
+// A variant represents a change in DNA sequence relative to a reference
+// sequence. For example, a variant could represent a SNP or an insertion.
+// Variants belong to a variant set.
+//
+// For more genomics resource definitions, see [Fundamentals of Google
+// Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+//
+// Each of the calls on a variant represent a determination of genotype with
+// respect to that variant. For example, a call might assign probability of 0.32
+// to the occurrence of a SNP named rs1234 in a sample named NA12345. A call
+// belongs to a call set, which contains related calls typically from one
+// sample.
+message Variant {
+ // The ID of the variant set this variant belongs to.
+ string variant_set_id = 15;
+
+ // The server-generated variant ID, unique across all variants.
+ string id = 2;
+
+ // Names for the variant, for example a RefSNP ID.
+ repeated string names = 3;
+
+ // The date this variant was created, in milliseconds from the epoch.
+ int64 created = 12;
+
+ // The reference on which this variant occurs.
+ // (such as `chr20` or `X`)
+ string reference_name = 14;
+
+ // The position at which this variant occurs (0-based).
+ // This corresponds to the first base of the string of reference bases.
+ int64 start = 16;
+
+ // The end position (0-based) of this variant. This corresponds to the first
+ // base after the last base in the reference allele. So, the length of
+ // the reference allele is (end - start). This is useful for variants
+ // that don't explicitly give alternate bases, for example large deletions.
+ int64 end = 13;
+
+ // The reference bases for this variant. They start at the given
+ // position.
+ string reference_bases = 6;
+
+ // The bases that appear instead of the reference bases.
+ repeated string alternate_bases = 7;
+
+ // A measure of how likely this variant is to be real.
+ // A higher value is better.
+ double quality = 8;
+
+ // A list of filters (normally quality filters) this variant has failed.
+ // `PASS` indicates this variant has passed all filters.
+ repeated string filter = 9;
+
+ // A map of additional variant information. This must be of the form
+ // map<string, string[]> (string key mapping to a list of string values).
+ map<string, google.protobuf.ListValue> info = 10;
+
+ // The variant calls for this particular variant. Each one represents the
+ // determination of genotype with respect to this variant.
+ repeated VariantCall calls = 11;
+}
+
+// A call represents the determination of genotype with respect to a particular
+// variant. It may include associated information such as quality and phasing.
+// For example, a call might assign a probability of 0.32 to the occurrence of
+// a SNP named rs1234 in a call set with the name NA12345.
+message VariantCall {
+ // The ID of the call set this variant call belongs to.
+ string call_set_id = 8;
+
+ // The name of the call set this variant call belongs to.
+ string call_set_name = 9;
+
+ // The genotype of this variant call. Each value represents either the value
+ // of the `referenceBases` field or a 1-based index into
+ // `alternateBases`. If a variant had a `referenceBases`
+ // value of `T` and an `alternateBases`
+ // value of `["A", "C"]`, and the `genotype` was
+ // `[2, 1]`, that would mean the call
+ // represented the heterozygous value `CA` for this variant.
+ // If the `genotype` was instead `[0, 1]`, the
+ // represented value would be `TA`. Ordering of the
+ // genotype values is important if the `phaseset` is present.
+ // If a genotype is not called (that is, a `.` is present in the
+ // GT string) -1 is returned.
+ repeated int32 genotype = 7;
+
+ // If this field is present, this variant call's genotype ordering implies
+ // the phase of the bases and is consistent with any other variant calls in
+ // the same reference sequence which have the same phaseset value.
+ // When importing data from VCF, if the genotype data was phased but no
+ // phase set was specified this field will be set to `*`.
+ string phaseset = 5;
+
+ // The genotype likelihoods for this variant call. Each array entry
+ // represents how likely a specific genotype is for this call. The value
+ // ordering is defined by the GL tag in the VCF spec.
+ // If Phred-scaled genotype likelihood scores (PL) are available and
+ // log10(P) genotype likelihood scores (GL) are not, PL scores are converted
+ // to GL scores. If both are available, PL scores are stored in `info`.
+ repeated double genotype_likelihood = 6;
+
+ // A map of additional variant call information. This must be of the form
+ // map<string, string[]> (string key mapping to a list of string values).
+ map<string, google.protobuf.ListValue> info = 2;
+}
+
+// A call set is a collection of variant calls, typically for one sample. It
+// belongs to a variant set.
+//
+// For more genomics resource definitions, see [Fundamentals of Google
+// Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)
+message CallSet {
+ // The server-generated call set ID, unique across all call sets.
+ string id = 1;
+
+ // The call set name.
+ string name = 2;
+
+ // The sample ID this call set corresponds to.
+ string sample_id = 7;
+
+ // The IDs of the variant sets this call set belongs to. This field must
+ // have exactly length one, as a call set belongs to a single variant set.
+ // This field is repeated for compatibility with the
+ // [GA4GH 0.5.1
+ // API](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/variants.avdl#L76).
+ repeated string variant_set_ids = 6;
+
+ // The date this call set was created in milliseconds from the epoch.
+ int64 created = 5;
+
+ // A map of additional call set information. This must be of the form
+ // map<string, string[]> (string key mapping to a list of string values).
+ map<string, google.protobuf.ListValue> info = 4;
+}
+
+// ReferenceBound records an upper bound for the starting coordinate of
+// variants in a particular reference.
+message ReferenceBound {
+ // The name of the reference associated with this reference bound.
+ string reference_name = 1;
+
+ // An upper bound (inclusive) on the starting coordinate of any
+ // variant in the reference sequence.
+ int64 upper_bound = 2;
+}
+
+// The variant data import request.
+message ImportVariantsRequest {
+ enum Format {
+ FORMAT_UNSPECIFIED = 0;
+
+ // VCF (Variant Call Format). The VCF files may be gzip compressed. gVCF is
+ // also supported.
+ FORMAT_VCF = 1;
+
+ // Complete Genomics masterVarBeta format. The masterVarBeta files may
+ // be bzip2 compressed.
+ FORMAT_COMPLETE_GENOMICS = 2;
+ }
+
+ // Required. The variant set to which variant data should be imported.
+ string variant_set_id = 1;
+
+ // A list of URIs referencing variant files in Google Cloud Storage. URIs can
+ // include wildcards [as described
+ // here](https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames).
+ // Note that recursive wildcards ('**') are not supported.
+ repeated string source_uris = 2;
+
+ // The format of the variant data being imported. If unspecified, defaults to
+ // to `VCF`.
+ Format format = 3;
+
+ // Convert reference names to the canonical representation.
+ // hg19 haploytypes (those reference names containing "_hap")
+ // are not modified in any way.
+ // All other reference names are modified according to the following rules:
+ // The reference name is capitalized.
+ // The "chr" prefix is dropped for all autosomes and sex chromsomes.
+ // For example "chr17" becomes "17" and "chrX" becomes "X".
+ // All mitochondrial chromosomes ("chrM", "chrMT", etc) become "MT".
+ bool normalize_reference_names = 5;
+
+ // A mapping between info field keys and the InfoMergeOperations to
+ // be performed on them. This is plumbed down to the MergeVariantRequests
+ // generated by the resulting import job.
+ map<string, InfoMergeOperation> info_merge_config = 6;
+}
+
+// The variant data import response.
+message ImportVariantsResponse {
+ // IDs of the call sets created during the import.
+ repeated string call_set_ids = 1;
+}
+
+// The CreateVariantSet request
+message CreateVariantSetRequest {
+ // Required. The variant set to be created. Must have a valid `datasetId`.
+ VariantSet variant_set = 1;
+}
+
+// The variant data export request.
+message ExportVariantSetRequest {
+ enum Format {
+ FORMAT_UNSPECIFIED = 0;
+
+ // Export the data to Google BigQuery.
+ FORMAT_BIGQUERY = 1;
+ }
+
+ // Required. The ID of the variant set that contains variant data which
+ // should be exported. The caller must have READ access to this variant set.
+ string variant_set_id = 1;
+
+ // If provided, only variant call information from the specified call sets
+ // will be exported. By default all variant calls are exported.
+ repeated string call_set_ids = 2;
+
+ // Required. The Google Cloud project ID that owns the destination
+ // BigQuery dataset. The caller must have WRITE access to this project. This
+ // project will also own the resulting export job.
+ string project_id = 3;
+
+ // The format for the exported data.
+ Format format = 4;
+
+ // Required. The BigQuery dataset to export data to. This dataset must already
+ // exist. Note that this is distinct from the Genomics concept of "dataset".
+ string bigquery_dataset = 5;
+
+ // Required. The BigQuery table to export data to.
+ // If the table doesn't exist, it will be created. If it already exists, it
+ // will be overwritten.
+ string bigquery_table = 6;
+}
+
+// The variant set request.
+message GetVariantSetRequest {
+ // Required. The ID of the variant set.
+ string variant_set_id = 1;
+}
+
+// The search variant sets request.
+message SearchVariantSetsRequest {
+ // Exactly one dataset ID must be provided here. Only variant sets which
+ // belong to this dataset will be returned.
+ repeated string dataset_ids = 1;
+
+ // The continuation token, which is used to page through large result sets.
+ // To get the next page of results, set this parameter to the value of
+ // `nextPageToken` from the previous response.
+ string page_token = 2;
+
+ // The maximum number of results to return in a single page. If unspecified,
+ // defaults to 1024.
+ int32 page_size = 3;
+}
+
+// The search variant sets response.
+message SearchVariantSetsResponse {
+ // The variant sets belonging to the requested dataset.
+ repeated VariantSet variant_sets = 1;
+
+ // The continuation token, which is used to page through large result sets.
+ // Provide this value in a subsequent request to return the next page of
+ // results. This field will be empty if there aren't any additional results.
+ string next_page_token = 2;
+}
+
+// The delete variant set request.
+message DeleteVariantSetRequest {
+ // The ID of the variant set to be deleted.
+ string variant_set_id = 1;
+}
+
+message UpdateVariantSetRequest {
+ // The ID of the variant to be updated (must already exist).
+ string variant_set_id = 1;
+
+ // The new variant data. Only the variant_set.metadata will be considered
+ // for update.
+ VariantSet variant_set = 2;
+
+ // An optional mask specifying which fields to update. Supported fields:
+ //
+ // * [metadata][google.genomics.v1.VariantSet.metadata].
+ // * [name][google.genomics.v1.VariantSet.name].
+ // * [description][google.genomics.v1.VariantSet.description].
+ //
+ // Leaving `updateMask` unset is equivalent to specifying all mutable
+ // fields.
+ google.protobuf.FieldMask update_mask = 5;
+}
+
+// The variant search request.
+message SearchVariantsRequest {
+ // At most one variant set ID must be provided. Only variants from this
+ // variant set will be returned. If omitted, a call set id must be included in
+ // the request.
+ repeated string variant_set_ids = 1;
+
+ // Only return variants which have exactly this name.
+ string variant_name = 2;
+
+ // Only return variant calls which belong to call sets with these ids.
+ // Leaving this blank returns all variant calls. If a variant has no
+ // calls belonging to any of these call sets, it won't be returned at all.
+ repeated string call_set_ids = 3;
+
+ // Required. Only return variants in this reference sequence.
+ string reference_name = 4;
+
+ // The beginning of the window (0-based, inclusive) for which
+ // overlapping variants should be returned. If unspecified, defaults to 0.
+ int64 start = 5;
+
+ // The end of the window, 0-based exclusive. If unspecified or 0, defaults to
+ // the length of the reference.
+ int64 end = 6;
+
+ // The continuation token, which is used to page through large result sets.
+ // To get the next page of results, set this parameter to the value of
+ // `nextPageToken` from the previous response.
+ string page_token = 7;
+
+ // The maximum number of variants to return in a single page. If unspecified,
+ // defaults to 5000. The maximum value is 10000.
+ int32 page_size = 8;
+
+ // The maximum number of calls to return in a single page. Note that this
+ // limit may be exceeded in the event that a matching variant contains more
+ // calls than the requested maximum. If unspecified, defaults to 5000. The
+ // maximum value is 10000.
+ int32 max_calls = 9;
+}
+
+// The variant search response.
+message SearchVariantsResponse {
+ // The list of matching Variants.
+ repeated Variant variants = 1;
+
+ // The continuation token, which is used to page through large result sets.
+ // Provide this value in a subsequent request to return the next page of
+ // results. This field will be empty if there aren't any additional results.
+ string next_page_token = 2;
+}
+
+message CreateVariantRequest {
+ // The variant to be created.
+ Variant variant = 1;
+}
+
+message UpdateVariantRequest {
+ // The ID of the variant to be updated.
+ string variant_id = 1;
+
+ // The new variant data.
+ Variant variant = 2;
+
+ // An optional mask specifying which fields to update. At this time, mutable
+ // fields are [names][google.genomics.v1.Variant.names] and
+ // [info][google.genomics.v1.Variant.info]. Acceptable values are "names" and
+ // "info". If unspecified, all mutable fields will be updated.
+ google.protobuf.FieldMask update_mask = 3;
+}
+
+message DeleteVariantRequest {
+ // The ID of the variant to be deleted.
+ string variant_id = 1;
+}
+
+message GetVariantRequest {
+ // The ID of the variant.
+ string variant_id = 1;
+}
+
+message MergeVariantsRequest {
+ // The destination variant set.
+ string variant_set_id = 1;
+
+ // The variants to be merged with existing variants.
+ repeated Variant variants = 2;
+
+ // A mapping between info field keys and the InfoMergeOperations to
+ // be performed on them.
+ map<string, InfoMergeOperation> info_merge_config = 3;
+}
+
+// The call set search request.
+message SearchCallSetsRequest {
+ // Restrict the query to call sets within the given variant sets. At least one
+ // ID must be provided.
+ repeated string variant_set_ids = 1;
+
+ // Only return call sets for which a substring of the name matches this
+ // string.
+ string name = 2;
+
+ // The continuation token, which is used to page through large result sets.
+ // To get the next page of results, set this parameter to the value of
+ // `nextPageToken` from the previous response.
+ string page_token = 3;
+
+ // The maximum number of results to return in a single page. If unspecified,
+ // defaults to 1024.
+ int32 page_size = 4;
+}
+
+// The call set search response.
+message SearchCallSetsResponse {
+ // The list of matching call sets.
+ repeated CallSet call_sets = 1;
+
+ // The continuation token, which is used to page through large result sets.
+ // Provide this value in a subsequent request to return the next page of
+ // results. This field will be empty if there aren't any additional results.
+ string next_page_token = 2;
+}
+
+message CreateCallSetRequest {
+ // The call set to be created.
+ CallSet call_set = 1;
+}
+
+message UpdateCallSetRequest {
+ // The ID of the call set to be updated.
+ string call_set_id = 1;
+
+ // The new call set data.
+ CallSet call_set = 2;
+
+ // An optional mask specifying which fields to update. At this time, the only
+ // mutable field is [name][google.genomics.v1.CallSet.name]. The only
+ // acceptable value is "name". If unspecified, all mutable fields will be
+ // updated.
+ google.protobuf.FieldMask update_mask = 3;
+}
+
+message DeleteCallSetRequest {
+ // The ID of the call set to be deleted.
+ string call_set_id = 1;
+}
+
+message GetCallSetRequest {
+ // The ID of the call set.
+ string call_set_id = 1;
+}
+
+// The stream variants request.
+message StreamVariantsRequest {
+ // The Google Cloud project ID which will be billed
+ // for this access. The caller must have WRITE access to this project.
+ // Required.
+ string project_id = 1;
+
+ // The variant set ID from which to stream variants.
+ string variant_set_id = 2;
+
+ // Only return variant calls which belong to call sets with these IDs.
+ // Leaving this blank returns all variant calls.
+ repeated string call_set_ids = 3;
+
+ // Required. Only return variants in this reference sequence.
+ string reference_name = 4;
+
+ // The beginning of the window (0-based, inclusive) for which
+ // overlapping variants should be returned.
+ int64 start = 5;
+
+ // The end of the window (0-based, exclusive) for which overlapping
+ // variants should be returned.
+ int64 end = 6;
+}
+
+message StreamVariantsResponse {
+ repeated Variant variants = 1;
+}
+
+// Operations to be performed during import on Variant info fields.
+// These operations are set for each info field in the info_merge_config
+// map of ImportVariantsRequest, which is plumbed down to the
+// MergeVariantRequests generated by the import job.
+enum InfoMergeOperation {
+ INFO_MERGE_OPERATION_UNSPECIFIED = 0;
+
+ // By default, Variant info fields are persisted if the Variant doesn't
+ // already exist in the variantset. If the Variant is equivalent to a
+ // Variant already in the variantset, the incoming Variant's info field
+ // is ignored in favor of that of the already persisted Variant.
+ IGNORE_NEW = 1;
+
+ // This operation removes an info field from the incoming Variant
+ // and persists this info field in each of the incoming Variant's Calls.
+ MOVE_TO_CALLS = 2;
+}
diff --git a/third_party/googleapis/google/genomics/v1alpha2/pipelines.proto b/third_party/googleapis/google/genomics/v1alpha2/pipelines.proto
new file mode 100644
index 0000000000..c0f5102234
--- /dev/null
+++ b/third_party/googleapis/google/genomics/v1alpha2/pipelines.proto
@@ -0,0 +1,614 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.genomics.v1alpha2;
+
+import "google/api/annotations.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/code.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/genomics/v1alpha2;genomics";
+option java_multiple_files = true;
+option java_outer_classname = "PipelinesProto";
+option java_package = "com.google.genomics.v1a";
+
+
+// A service for running genomics pipelines.
+service PipelinesV1Alpha2 {
+ // Creates a pipeline that can be run later. Create takes a Pipeline that
+ // has all fields other than `pipelineId` populated, and then returns
+ // the same pipeline with `pipelineId` populated. This id can be used
+ // to run the pipeline.
+ //
+ // Caller must have WRITE permission to the project.
+ rpc CreatePipeline(CreatePipelineRequest) returns (Pipeline) {
+ option (google.api.http) = { post: "/v1alpha2/pipelines" body: "pipeline" };
+ }
+
+ // Runs a pipeline. If `pipelineId` is specified in the request, then
+ // run a saved pipeline. If `ephemeralPipeline` is specified, then run
+ // that pipeline once without saving a copy.
+ //
+ // The caller must have READ permission to the project where the pipeline
+ // is stored and WRITE permission to the project where the pipeline will be
+ // run, as VMs will be created and storage will be used.
+ rpc RunPipeline(RunPipelineRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1alpha2/pipelines:run" body: "*" };
+ }
+
+ // Retrieves a pipeline based on ID.
+ //
+ // Caller must have READ permission to the project.
+ rpc GetPipeline(GetPipelineRequest) returns (Pipeline) {
+ option (google.api.http) = { get: "/v1alpha2/pipelines/{pipeline_id}" };
+ }
+
+ // Lists pipelines.
+ //
+ // Caller must have READ permission to the project.
+ rpc ListPipelines(ListPipelinesRequest) returns (ListPipelinesResponse) {
+ option (google.api.http) = { get: "/v1alpha2/pipelines" };
+ }
+
+ // Deletes a pipeline based on ID.
+ //
+ // Caller must have WRITE permission to the project.
+ rpc DeletePipeline(DeletePipelineRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1alpha2/pipelines/{pipeline_id}" };
+ }
+
+ // Gets controller configuration information. Should only be called
+ // by VMs created by the Pipelines Service and not by end users.
+ rpc GetControllerConfig(GetControllerConfigRequest) returns (ControllerConfig) {
+ option (google.api.http) = { get: "/v1alpha2/pipelines:getControllerConfig" };
+ }
+
+ // Sets status of a given operation. Any new timestamps (as determined by
+ // description) are appended to TimestampEvents. Should only be called by VMs
+ // created by the Pipelines Service and not by end users.
+ rpc SetOperationStatus(SetOperationStatusRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { put: "/v1alpha2/pipelines:setOperationStatus" body: "*" };
+ }
+}
+
+// Describes a Compute Engine resource that is being managed by a running
+// [pipeline][google.genomics.v1alpha2.Pipeline].
+message ComputeEngine {
+ // The instance on which the operation is running.
+ string instance_name = 1;
+
+ // The availability zone in which the instance resides.
+ string zone = 2;
+
+ // The machine type of the instance.
+ string machine_type = 3;
+
+ // The names of the disks that were created for this pipeline.
+ repeated string disk_names = 4;
+}
+
+// Runtime metadata that will be populated in the
+// [runtimeMetadata][google.genomics.v1.OperationMetadata.runtime_metadata]
+// field of the Operation associated with a RunPipeline execution.
+message RuntimeMetadata {
+ // Execution information specific to Google Compute Engine.
+ ComputeEngine compute_engine = 1;
+}
+
+// The pipeline object. Represents a transformation from a set of input
+// parameters to a set of output parameters. The transformation is defined
+// as a docker image and command to run within that image. Each pipeline
+// is run on a Google Compute Engine VM. A pipeline can be created with the
+// `create` method and then later run with the `run` method, or a pipeline can
+// be defined and run all at once with the `run` method.
+message Pipeline {
+ // Required. The project in which to create the pipeline. The caller must have
+ // WRITE access.
+ string project_id = 1;
+
+ // Required. A user specified pipeline name that does not have to be unique.
+ // This name can be used for filtering Pipelines in ListPipelines.
+ string name = 2;
+
+ // User-specified description.
+ string description = 3;
+
+ // Input parameters of the pipeline.
+ repeated PipelineParameter input_parameters = 8;
+
+ // Output parameters of the pipeline.
+ repeated PipelineParameter output_parameters = 9;
+
+ // Required. The executor indicates in which environment the pipeline runs.
+ oneof executor {
+ // Specifies the docker run information.
+ DockerExecutor docker = 5;
+ }
+
+ // Required. Specifies resource requirements for the pipeline run.
+ // Required fields:
+ //
+ // *
+ // [minimumCpuCores][google.genomics.v1alpha2.PipelineResources.minimum_cpu_cores]
+ //
+ // *
+ // [minimumRamGb][google.genomics.v1alpha2.PipelineResources.minimum_ram_gb]
+ PipelineResources resources = 6;
+
+ // Unique pipeline id that is generated by the service when CreatePipeline
+ // is called. Cannot be specified in the Pipeline used in the
+ // CreatePipelineRequest, and will be populated in the response to
+ // CreatePipeline and all subsequent Get and List calls. Indicates that the
+ // service has registered this pipeline.
+ string pipeline_id = 7;
+}
+
+// The request to create a pipeline. The pipeline field here should not have
+// `pipelineId` populated, as that will be populated by the server.
+message CreatePipelineRequest {
+ // The pipeline to create. Should not have `pipelineId` populated.
+ Pipeline pipeline = 1;
+}
+
+// The pipeline run arguments.
+message RunPipelineArgs {
+ // Required. The project in which to run the pipeline. The caller must have
+ // WRITER access to all Google Cloud services and resources (e.g. Google
+ // Compute Engine) will be used.
+ string project_id = 1;
+
+ // Pipeline input arguments; keys are defined in the pipeline documentation.
+ // All input parameters that do not have default values must be specified.
+ // If parameters with defaults are specified here, the defaults will be
+ // overridden.
+ map<string, string> inputs = 2;
+
+ // Pipeline output arguments; keys are defined in the pipeline
+ // documentation. All output parameters of without default values
+ // must be specified. If parameters with defaults are specified
+ // here, the defaults will be overridden.
+ map<string, string> outputs = 3;
+
+ // The Google Cloud Service Account that will be used to access data and
+ // services. By default, the compute service account associated with
+ // `projectId` is used.
+ ServiceAccount service_account = 4;
+
+ // This field is deprecated. Use `labels` instead. Client-specified pipeline
+ // operation identifier.
+ string client_id = 5;
+
+ // Specifies resource requirements/overrides for the pipeline run.
+ PipelineResources resources = 6;
+
+ // Required. Logging options. Used by the service to communicate results
+ // to the user.
+ LoggingOptions logging = 7;
+
+ // How long to keep the VM up after a failure (for example docker command
+ // failed, copying input or output files failed, etc). While the VM is up, one
+ // can ssh into the VM to debug. Default is 0; maximum allowed value is 1 day.
+ google.protobuf.Duration keep_vm_alive_on_failure_duration = 8;
+
+ // Labels to apply to this pipeline run. Labels will also be applied to
+ // compute resources (VM, disks) created by this pipeline run. When listing
+ // operations, operations can [filtered by labels]
+ // [google.longrunning.ListOperationsRequest.filter].
+ // Label keys may not be empty; label values may be empty. Non-empty labels
+ // must be 1-63 characters long, and comply with [RFC1035]
+ // (https://www.ietf.org/rfc/rfc1035.txt).
+ // Specifically, the name must be 1-63 characters long and match the regular
+ // expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first
+ // character must be a lowercase letter, and all following characters must be
+ // a dash, lowercase letter, or digit, except the last character, which cannot
+ // be a dash.
+ map<string, string> labels = 9;
+}
+
+// The request to run a pipeline. If `pipelineId` is specified, it
+// refers to a saved pipeline created with CreatePipeline and set as
+// the `pipelineId` of the returned Pipeline object. If
+// `ephemeralPipeline` is specified, that pipeline is run once
+// with the given args and not saved. It is an error to specify both
+// `pipelineId` and `ephemeralPipeline`. `pipelineArgs`
+// must be specified.
+message RunPipelineRequest {
+ oneof pipeline {
+ // The already created pipeline to run.
+ string pipeline_id = 1;
+
+ // A new pipeline object to run once and then delete.
+ Pipeline ephemeral_pipeline = 2;
+ }
+
+ // The arguments to use when running this pipeline.
+ RunPipelineArgs pipeline_args = 3;
+}
+
+// A request to get a saved pipeline by id.
+message GetPipelineRequest {
+ // Caller must have READ access to the project in which this pipeline
+ // is defined.
+ string pipeline_id = 1;
+}
+
+// A request to list pipelines in a given project. Pipelines can be
+// filtered by name using `namePrefix`: all pipelines with names that
+// begin with `namePrefix` will be returned. Uses standard pagination:
+// `pageSize` indicates how many pipelines to return, and
+// `pageToken` comes from a previous ListPipelinesResponse to
+// indicate offset.
+message ListPipelinesRequest {
+ // Required. The name of the project to search for pipelines. Caller
+ // must have READ access to this project.
+ string project_id = 1;
+
+ // Pipelines with names that match this prefix should be
+ // returned. If unspecified, all pipelines in the project, up to
+ // `pageSize`, will be returned.
+ string name_prefix = 2;
+
+ // Number of pipelines to return at once. Defaults to 256, and max
+ // is 2048.
+ int32 page_size = 3;
+
+ // Token to use to indicate where to start getting results.
+ // If unspecified, returns the first page of results.
+ string page_token = 4;
+}
+
+// The response of ListPipelines. Contains at most `pageSize`
+// pipelines. If it contains `pageSize` pipelines, and more pipelines
+// exist, then `nextPageToken` will be populated and should be
+// used as the `pageToken` argument to a subsequent ListPipelines
+// request.
+message ListPipelinesResponse {
+ // The matched pipelines.
+ repeated Pipeline pipelines = 1;
+
+ // The token to use to get the next page of results.
+ string next_page_token = 2;
+}
+
+// The request to delete a saved pipeline by ID.
+message DeletePipelineRequest {
+ // Caller must have WRITE access to the project in which this pipeline
+ // is defined.
+ string pipeline_id = 1;
+}
+
+// Request to get controller configuation. Should only be used
+// by VMs created by the Pipelines Service and not by end users.
+message GetControllerConfigRequest {
+ // The operation to retrieve controller configuration for.
+ string operation_id = 1;
+
+ uint64 validation_token = 2;
+}
+
+// Stores the information that the controller will fetch from the
+// server in order to run. Should only be used by VMs created by the
+// Pipelines Service and not by end users.
+message ControllerConfig {
+ message RepeatedString {
+ repeated string values = 1;
+ }
+
+ string image = 1;
+
+ string cmd = 2;
+
+ string gcs_log_path = 3;
+
+ string machine_type = 4;
+
+ map<string, string> vars = 5;
+
+ map<string, string> disks = 6;
+
+ map<string, RepeatedString> gcs_sources = 7;
+
+ map<string, RepeatedString> gcs_sinks = 8;
+}
+
+// Stores the list of events and times they occured for major events in job
+// execution.
+message TimestampEvent {
+ // String indicating the type of event
+ string description = 1;
+
+ // The time this event occured.
+ google.protobuf.Timestamp timestamp = 2;
+}
+
+// Request to set operation status. Should only be used by VMs
+// created by the Pipelines Service and not by end users.
+message SetOperationStatusRequest {
+ string operation_id = 1;
+
+ repeated TimestampEvent timestamp_events = 2;
+
+ google.rpc.Code error_code = 3;
+
+ string error_message = 4;
+
+ uint64 validation_token = 5;
+}
+
+// A Google Cloud Service Account.
+message ServiceAccount {
+ // Email address of the service account. Defaults to `default`,
+ // which uses the compute service account associated with the project.
+ string email = 1;
+
+ // List of scopes to be enabled for this service account on the VM.
+ // The following scopes are automatically included:
+ //
+ // * https://www.googleapis.com/auth/compute
+ // * https://www.googleapis.com/auth/devstorage.full_control
+ // * https://www.googleapis.com/auth/genomics
+ // * https://www.googleapis.com/auth/logging.write
+ // * https://www.googleapis.com/auth/monitoring.write
+ repeated string scopes = 2;
+}
+
+// The logging options for the pipeline run.
+message LoggingOptions {
+ // The location in Google Cloud Storage to which the pipeline logs
+ // will be copied. Can be specified as a fully qualified directory
+ // path, in which case logs will be output with a unique identifier
+ // as the filename in that directory, or as a fully specified path,
+ // which must end in `.log`, in which case that path will be
+ // used, and the user must ensure that logs are not
+ // overwritten. Stdout and stderr logs from the run are also
+ // generated and output as `-stdout.log` and `-stderr.log`.
+ string gcs_path = 1;
+}
+
+// The system resources for the pipeline run.
+message PipelineResources {
+ // A Google Compute Engine disk resource specification.
+ message Disk {
+ // The types of disks that may be attached to VMs.
+ enum Type {
+ // Default disk type. Use one of the other options below.
+ TYPE_UNSPECIFIED = 0;
+
+ // Specifies a Google Compute Engine persistent hard disk. See
+ // https://cloud.google.com/compute/docs/disks/#pdspecs for details.
+ PERSISTENT_HDD = 1;
+
+ // Specifies a Google Compute Engine persistent solid-state disk. See
+ // https://cloud.google.com/compute/docs/disks/#pdspecs for details.
+ PERSISTENT_SSD = 2;
+
+ // Specifies a Google Compute Engine local SSD.
+ // See https://cloud.google.com/compute/docs/disks/local-ssd for details.
+ LOCAL_SSD = 3;
+ }
+
+ // Required. The name of the disk that can be used in the pipeline
+ // parameters. Must be 1 - 63 characters.
+ // The name "boot" is reserved for system use.
+ string name = 1;
+
+ // Required. The type of the disk to create.
+ Type type = 2;
+
+ // The size of the disk. Defaults to 500 (GB).
+ // This field is not applicable for local SSD.
+ int32 size_gb = 3;
+
+ // The full or partial URL of the persistent disk to attach. See
+ // https://cloud.google.com/compute/docs/reference/latest/instances#resource
+ // and
+ // https://cloud.google.com/compute/docs/disks/persistent-disks#snapshots
+ // for more details.
+ string source = 4;
+
+ // Deprecated. Disks created by the Pipelines API will be deleted at the end
+ // of the pipeline run, regardless of what this field is set to.
+ bool auto_delete = 6;
+
+ // Required at create time and cannot be overridden at run time.
+ // Specifies the path in the docker container where files on
+ // this disk should be located. For example, if `mountPoint`
+ // is `/mnt/disk`, and the parameter has `localPath`
+ // `inputs/file.txt`, the docker container can access the data at
+ // `/mnt/disk/inputs/file.txt`.
+ string mount_point = 8;
+ }
+
+ // The minimum number of cores to use. Defaults to 1.
+ int32 minimum_cpu_cores = 1;
+
+ // Whether to use preemptible VMs. Defaults to `false`. In order to use this,
+ // must be true for both create time and run time. Cannot be true at run time
+ // if false at create time.
+ bool preemptible = 2;
+
+ // The minimum amount of RAM to use. Defaults to 3.75 (GB)
+ double minimum_ram_gb = 3;
+
+ // Disks to attach.
+ repeated Disk disks = 4;
+
+ // List of Google Compute Engine availability zones to which resource
+ // creation will restricted. If empty, any zone may be chosen.
+ repeated string zones = 5;
+
+ // The size of the boot disk. Defaults to 10 (GB).
+ int32 boot_disk_size_gb = 6;
+
+ // Whether to assign an external IP to the instance. This is an experimental
+ // feature that may go away. Defaults to false.
+ // Corresponds to `--no_address` flag for [gcloud compute instances create]
+ // (https://cloud.google.com/sdk/gcloud/reference/compute/instances/create).
+ // In order to use this, must be true for both create time and run time.
+ // Cannot be true at run time if false at create time. If you need to ssh into
+ // a private IP VM for debugging, you can ssh to a public VM and then ssh into
+ // the private VM's Internal IP. If noAddress is set, this pipeline run may
+ // only load docker images from Google Container Registry and not Docker Hub.
+ // ** Note: To use this option, your project must be in Google Access for
+ // Private IPs Early Access Program.**
+ bool no_address = 7;
+}
+
+// Parameters facilitate setting and delivering data into the
+// pipeline's execution environment. They are defined at create time,
+// with optional defaults, and can be overridden at run time.
+//
+// If `localCopy` is unset, then the parameter specifies a string that
+// is passed as-is into the pipeline, as the value of the environment
+// variable with the given name. A default value can be optionally
+// specified at create time. The default can be overridden at run time
+// using the inputs map. If no default is given, a value must be
+// supplied at runtime.
+//
+// If `localCopy` is defined, then the parameter specifies a data
+// source or sink, both in Google Cloud Storage and on the Docker container
+// where the pipeline computation is run. The [service account associated with
+// the Pipeline][google.genomics.v1alpha2.RunPipelineArgs.service_account] (by
+// default the project's Compute Engine service account) must have access to the
+// Google Cloud Storage paths.
+//
+// At run time, the Google Cloud Storage paths can be overridden if a default
+// was provided at create time, or must be set otherwise. The pipeline runner
+// should add a key/value pair to either the inputs or outputs map. The
+// indicated data copies will be carried out before/after pipeline execution,
+// just as if the corresponding arguments were provided to `gsutil cp`.
+//
+// For example: Given the following `PipelineParameter`, specified
+// in the `inputParameters` list:
+//
+// ```
+// {name: "input_file", localCopy: {path: "file.txt", disk: "pd1"}}
+// ```
+//
+// where `disk` is defined in the `PipelineResources` object as:
+//
+// ```
+// {name: "pd1", mountPoint: "/mnt/disk/"}
+// ```
+//
+// We create a disk named `pd1`, mount it on the host VM, and map
+// `/mnt/pd1` to `/mnt/disk` in the docker container. At
+// runtime, an entry for `input_file` would be required in the inputs
+// map, such as:
+//
+// ```
+// inputs["input_file"] = "gs://my-bucket/bar.txt"
+// ```
+//
+// This would generate the following gsutil call:
+//
+// ```
+// gsutil cp gs://my-bucket/bar.txt /mnt/pd1/file.txt
+// ```
+//
+// The file `/mnt/pd1/file.txt` maps to `/mnt/disk/file.txt` in the
+// Docker container. Acceptable paths are:
+//
+// <table>
+// <thead>
+// <tr><th>Google Cloud storage path</th><th>Local path</th></tr>
+// </thead>
+// <tbody>
+// <tr><td>file</td><td>file</td></tr>
+// <tr><td>glob</td><td>directory</td></tr>
+// </tbody>
+// </table>
+//
+// For outputs, the direction of the copy is reversed:
+//
+// ```
+// gsutil cp /mnt/disk/file.txt gs://my-bucket/bar.txt
+// ```
+//
+// Acceptable paths are:
+//
+// <table>
+// <thead>
+// <tr><th>Local path</th><th>Google Cloud Storage path</th></tr>
+// </thead>
+// <tbody>
+// <tr><td>file</td><td>file</td></tr>
+// <tr>
+// <td>file</td>
+// <td>directory - directory must already exist</td>
+// </tr>
+// <tr>
+// <td>glob</td>
+// <td>directory - directory will be created if it doesn't exist</td></tr>
+// </tbody>
+// </table>
+//
+// One restriction due to docker limitations, is that for outputs that are found
+// on the boot disk, the local path cannot be a glob and must be a file.
+message PipelineParameter {
+ // LocalCopy defines how a remote file should be copied to and from the VM.
+ message LocalCopy {
+ // Required. The path within the user's docker container where
+ // this input should be localized to and from, relative to the specified
+ // disk's mount point. For example: file.txt,
+ string path = 1;
+
+ // Required. The name of the disk where this parameter is
+ // located. Can be the name of one of the disks specified in the
+ // Resources field, or "boot", which represents the Docker
+ // instance's boot disk and has a mount point of `/`.
+ string disk = 2;
+ }
+
+ // Required. Name of the parameter - the pipeline runner uses this string
+ // as the key to the input and output maps in RunPipeline.
+ string name = 1;
+
+ // Human-readable description.
+ string description = 2;
+
+ // The default value for this parameter. Can be overridden at runtime.
+ // If `localCopy` is present, then this must be a Google Cloud Storage path
+ // beginning with `gs://`.
+ string default_value = 5;
+
+ // If present, this parameter is marked for copying to and from the VM.
+ // `LocalCopy` indicates where on the VM the file should be. The value
+ // given to this parameter (either at runtime or using `defaultValue`)
+ // must be the remote path where the file should be.
+ LocalCopy local_copy = 6;
+}
+
+// The Docker execuctor specification.
+message DockerExecutor {
+ // Required. Image name from either Docker Hub or Google Container Registry.
+ // Users that run pipelines must have READ access to the image.
+ string image_name = 1;
+
+ // Required. The command or newline delimited script to run. The command
+ // string will be executed within a bash shell.
+ //
+ // If the command exits with a non-zero exit code, output parameter
+ // de-localization will be skipped and the pipeline operation's
+ // [`error`][google.longrunning.Operation.error] field will be populated.
+ //
+ // Maximum command string length is 16384.
+ string cmd = 2;
+}
diff --git a/third_party/googleapis/google/iam/README.md b/third_party/googleapis/google/iam/README.md
new file mode 100644
index 0000000000..68e7fa0138
--- /dev/null
+++ b/third_party/googleapis/google/iam/README.md
@@ -0,0 +1,35 @@
+# Google Identity and Access Management (IAM) API
+
+Documentation of the access control API that will be implemented by all
+1st party services provided by the Google Cloud Platform (like Cloud Storage,
+Compute Engine, App Engine).
+
+Any implementation of an API that offers access control features
+will implement the google.iam.v1.IAMPolicy interface.
+
+## Data model
+
+Access control is applied when a principal (user or service account), takes
+some action on a resource exposed by a service. Resources, identified by
+URI-like names, are the unit of access control specification. It is up to
+the service implementations to choose what granularity of access control to
+support and what set of actions (permissions) to support for the resources
+they provide. For example one database service may allow access control to be
+specified only at the Table level, whereas another might allow access control
+to also be specified at the Column level.
+
+This is intentionally not a CRUD style API because access control policies
+are created and deleted implicitly with the resources to which they are
+attached.
+
+## Policy
+
+A `Policy` consists of a list of bindings. A `Binding` binds a set of members
+to a role, where the members can include user accounts, user groups, user
+domains, and service accounts. A role is a named set of permissions, defined
+by the IAM system. The definition of a role is outside the policy.
+
+A permission check involves determining the roles that include the specified
+permission, and then determining if the principal specified by the check is a
+member of a binding to at least one of these roles. The membership check is
+recursive when a group is bound to a role. \ No newline at end of file
diff --git a/third_party/googleapis/google/iam/admin/v1/iam.proto b/third_party/googleapis/google/iam/admin/v1/iam.proto
new file mode 100644
index 0000000000..2e2fe2b72f
--- /dev/null
+++ b/third_party/googleapis/google/iam/admin/v1/iam.proto
@@ -0,0 +1,469 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.iam.admin.v1;
+
+import "google/api/annotations.proto";
+import "google/iam/v1/iam_policy.proto";
+import "google/iam/v1/policy.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/timestamp.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/iam/admin/v1;admin";
+option java_multiple_files = true;
+option java_outer_classname = "IamProto";
+option java_package = "com.google.iam.admin.v1";
+
+
+// Creates and manages service account objects.
+//
+// Service account is an account that belongs to your project instead
+// of to an individual end user. It is used to authenticate calls
+// to a Google API.
+//
+// To create a service account, specify the `project_id` and `account_id`
+// for the account. The `account_id` is unique within the project, and used
+// to generate the service account email address and a stable
+// `unique_id`.
+//
+// All other methods can identify accounts using the format
+// `projects/{project}/serviceAccounts/{account}`.
+// Using `-` as a wildcard for the project will infer the project from
+// the account. The `account` value can be the `email` address or the
+// `unique_id` of the service account.
+service IAM {
+ // Lists [ServiceAccounts][google.iam.admin.v1.ServiceAccount] for a project.
+ rpc ListServiceAccounts(ListServiceAccountsRequest) returns (ListServiceAccountsResponse) {
+ option (google.api.http) = { get: "/v1/{name=projects/*}/serviceAccounts" };
+ }
+
+ // Gets a [ServiceAccount][google.iam.admin.v1.ServiceAccount].
+ rpc GetServiceAccount(GetServiceAccountRequest) returns (ServiceAccount) {
+ option (google.api.http) = { get: "/v1/{name=projects/*/serviceAccounts/*}" };
+ }
+
+ // Creates a [ServiceAccount][google.iam.admin.v1.ServiceAccount]
+ // and returns it.
+ rpc CreateServiceAccount(CreateServiceAccountRequest) returns (ServiceAccount) {
+ option (google.api.http) = { post: "/v1/{name=projects/*}/serviceAccounts" body: "*" };
+ }
+
+ // Updates a [ServiceAccount][google.iam.admin.v1.ServiceAccount].
+ //
+ // Currently, only the following fields are updatable:
+ // `display_name` .
+ // The `etag` is mandatory.
+ rpc UpdateServiceAccount(ServiceAccount) returns (ServiceAccount) {
+ option (google.api.http) = { put: "/v1/{name=projects/*/serviceAccounts/*}" body: "*" };
+ }
+
+ // Deletes a [ServiceAccount][google.iam.admin.v1.ServiceAccount].
+ rpc DeleteServiceAccount(DeleteServiceAccountRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/{name=projects/*/serviceAccounts/*}" };
+ }
+
+ // Lists [ServiceAccountKeys][google.iam.admin.v1.ServiceAccountKey].
+ rpc ListServiceAccountKeys(ListServiceAccountKeysRequest) returns (ListServiceAccountKeysResponse) {
+ option (google.api.http) = { get: "/v1/{name=projects/*/serviceAccounts/*}/keys" };
+ }
+
+ // Gets the [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey]
+ // by key id.
+ rpc GetServiceAccountKey(GetServiceAccountKeyRequest) returns (ServiceAccountKey) {
+ option (google.api.http) = { get: "/v1/{name=projects/*/serviceAccounts/*/keys/*}" };
+ }
+
+ // Creates a [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey]
+ // and returns it.
+ rpc CreateServiceAccountKey(CreateServiceAccountKeyRequest) returns (ServiceAccountKey) {
+ option (google.api.http) = { post: "/v1/{name=projects/*/serviceAccounts/*}/keys" body: "*" };
+ }
+
+ // Deletes a [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey].
+ rpc DeleteServiceAccountKey(DeleteServiceAccountKeyRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/{name=projects/*/serviceAccounts/*/keys/*}" };
+ }
+
+ // Signs a blob using a service account's system-managed private key.
+ rpc SignBlob(SignBlobRequest) returns (SignBlobResponse) {
+ option (google.api.http) = { post: "/v1/{name=projects/*/serviceAccounts/*}:signBlob" body: "*" };
+ }
+
+ // Returns the IAM access control policy for a
+ // [ServiceAccount][google.iam.admin.v1.ServiceAccount].
+ rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) {
+ option (google.api.http) = { post: "/v1/{resource=projects/*/serviceAccounts/*}:getIamPolicy" body: "" };
+ }
+
+ // Sets the IAM access control policy for a
+ // [ServiceAccount][google.iam.admin.v1.ServiceAccount].
+ rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) {
+ option (google.api.http) = { post: "/v1/{resource=projects/*/serviceAccounts/*}:setIamPolicy" body: "*" };
+ }
+
+ // Tests the specified permissions against the IAM access control policy
+ // for a [ServiceAccount][google.iam.admin.v1.ServiceAccount].
+ rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) {
+ option (google.api.http) = { post: "/v1/{resource=projects/*/serviceAccounts/*}:testIamPermissions" body: "*" };
+ }
+
+ // Queries roles that can be granted on a particular resource.
+ // A role is grantable if it can be used as the role in a binding for a policy
+ // for that resource.
+ rpc QueryGrantableRoles(QueryGrantableRolesRequest) returns (QueryGrantableRolesResponse) {
+ option (google.api.http) = { post: "/v1/roles:queryGrantableRoles" body: "*" };
+ }
+}
+
+// A service account in the Identity and Access Management API.
+//
+// To create a service account, specify the `project_id` and the `account_id`
+// for the account. The `account_id` is unique within the project, and is used
+// to generate the service account email address and a stable
+// `unique_id`.
+//
+// If the account already exists, the account's resource name is returned
+// in util::Status's ResourceInfo.resource_name in the format of
+// projects/{project}/serviceAccounts/{email}. The caller can use the name in
+// other methods to access the account.
+//
+// All other methods can identify the service account using the format
+// `projects/{project}/serviceAccounts/{account}`.
+// Using `-` as a wildcard for the project will infer the project from
+// the account. The `account` value can be the `email` address or the
+// `unique_id` of the service account.
+message ServiceAccount {
+ // The resource name of the service account in the following format:
+ // `projects/{project}/serviceAccounts/{account}`.
+ //
+ // Requests using `-` as a wildcard for the project will infer the project
+ // from the `account` and the `account` value can be the `email` address or
+ // the `unique_id` of the service account.
+ //
+ // In responses the resource name will always be in the format
+ // `projects/{project}/serviceAccounts/{email}`.
+ string name = 1;
+
+ // @OutputOnly The id of the project that owns the service account.
+ string project_id = 2;
+
+ // @OutputOnly The unique and stable id of the service account.
+ string unique_id = 4;
+
+ // @OutputOnly The email address of the service account.
+ string email = 5;
+
+ // Optional. A user-specified description of the service account. Must be
+ // fewer than 100 UTF-8 bytes.
+ string display_name = 6;
+
+ // Used to perform a consistent read-modify-write.
+ bytes etag = 7;
+
+ // @OutputOnly. The OAuth2 client id for the service account.
+ // This is used in conjunction with the OAuth2 clientconfig API to make
+ // three legged OAuth2 (3LO) flows to access the data of Google users.
+ string oauth2_client_id = 9;
+}
+
+// The service account create request.
+message CreateServiceAccountRequest {
+ // Required. The resource name of the project associated with the service
+ // accounts, such as `projects/my-project-123`.
+ string name = 1;
+
+ // Required. The account id that is used to generate the service account
+ // email address and a stable unique id. It is unique within a project,
+ // must be 6-30 characters long, and match the regular expression
+ // `[a-z]([-a-z0-9]*[a-z0-9])` to comply with RFC1035.
+ string account_id = 2;
+
+ // The [ServiceAccount][google.iam.admin.v1.ServiceAccount] resource to create.
+ // Currently, only the following values are user assignable:
+ // `display_name` .
+ ServiceAccount service_account = 3;
+}
+
+// The service account list request.
+message ListServiceAccountsRequest {
+ // Required. The resource name of the project associated with the service
+ // accounts, such as `projects/my-project-123`.
+ string name = 1;
+
+ // Optional limit on the number of service accounts to include in the
+ // response. Further accounts can subsequently be obtained by including the
+ // [ListServiceAccountsResponse.next_page_token][google.iam.admin.v1.ListServiceAccountsResponse.next_page_token]
+ // in a subsequent request.
+ int32 page_size = 2;
+
+ // Optional pagination token returned in an earlier
+ // [ListServiceAccountsResponse.next_page_token][google.iam.admin.v1.ListServiceAccountsResponse.next_page_token].
+ string page_token = 3;
+}
+
+// The service account list response.
+message ListServiceAccountsResponse {
+ // The list of matching service accounts.
+ repeated ServiceAccount accounts = 1;
+
+ // To retrieve the next page of results, set
+ // [ListServiceAccountsRequest.page_token][google.iam.admin.v1.ListServiceAccountsRequest.page_token]
+ // to this value.
+ string next_page_token = 2;
+}
+
+// The service account get request.
+message GetServiceAccountRequest {
+ // The resource name of the service account in the following format:
+ // `projects/{project}/serviceAccounts/{account}`.
+ // Using `-` as a wildcard for the project will infer the project from
+ // the account. The `account` value can be the `email` address or the
+ // `unique_id` of the service account.
+ string name = 1;
+}
+
+// The service account delete request.
+message DeleteServiceAccountRequest {
+ // The resource name of the service account in the following format:
+ // `projects/{project}/serviceAccounts/{account}`.
+ // Using `-` as a wildcard for the project will infer the project from
+ // the account. The `account` value can be the `email` address or the
+ // `unique_id` of the service account.
+ string name = 1;
+}
+
+// The service account keys list request.
+message ListServiceAccountKeysRequest {
+ // `KeyType` filters to selectively retrieve certain varieties
+ // of keys.
+ enum KeyType {
+ // Unspecified key type. The presence of this in the
+ // message will immediately result in an error.
+ KEY_TYPE_UNSPECIFIED = 0;
+
+ // User-managed keys (managed and rotated by the user).
+ USER_MANAGED = 1;
+
+ // System-managed keys (managed and rotated by Google).
+ SYSTEM_MANAGED = 2;
+ }
+
+ // The resource name of the service account in the following format:
+ // `projects/{project}/serviceAccounts/{account}`.
+ //
+ // Using `-` as a wildcard for the project, will infer the project from
+ // the account. The `account` value can be the `email` address or the
+ // `unique_id` of the service account.
+ string name = 1;
+
+ // Filters the types of keys the user wants to include in the list
+ // response. Duplicate key types are not allowed. If no key type
+ // is provided, all keys are returned.
+ repeated KeyType key_types = 2;
+}
+
+// The service account keys list response.
+message ListServiceAccountKeysResponse {
+ // The public keys for the service account.
+ repeated ServiceAccountKey keys = 1;
+}
+
+// The service account key get by id request.
+message GetServiceAccountKeyRequest {
+ // The resource name of the service account key in the following format:
+ // `projects/{project}/serviceAccounts/{account}/keys/{key}`.
+ //
+ // Using `-` as a wildcard for the project will infer the project from
+ // the account. The `account` value can be the `email` address or the
+ // `unique_id` of the service account.
+ string name = 1;
+
+ // The output format of the public key requested.
+ // X509_PEM is the default output format.
+ ServiceAccountPublicKeyType public_key_type = 2;
+}
+
+// Represents a service account key.
+//
+// A service account has two sets of key-pairs: user-managed, and
+// system-managed.
+//
+// User-managed key-pairs can be created and deleted by users. Users are
+// responsible for rotating these keys periodically to ensure security of
+// their service accounts. Users retain the private key of these key-pairs,
+// and Google retains ONLY the public key.
+//
+// System-managed key-pairs are managed automatically by Google, and rotated
+// daily without user intervention. The private key never leaves Google's
+// servers to maximize security.
+//
+// Public keys for all service accounts are also published at the OAuth2
+// Service Account API.
+message ServiceAccountKey {
+ // The resource name of the service account key in the following format
+ // `projects/{project}/serviceAccounts/{account}/keys/{key}`.
+ string name = 1;
+
+ // The output format for the private key.
+ // Only provided in `CreateServiceAccountKey` responses, not
+ // in `GetServiceAccountKey` or `ListServiceAccountKey` responses.
+ //
+ // Google never exposes system-managed private keys, and never retains
+ // user-managed private keys.
+ ServiceAccountPrivateKeyType private_key_type = 2;
+
+ // Specifies the algorithm (and possibly key size) for the key.
+ ServiceAccountKeyAlgorithm key_algorithm = 8;
+
+ // The private key data. Only provided in `CreateServiceAccountKey`
+ // responses.
+ bytes private_key_data = 3;
+
+ // The public key data. Only provided in `GetServiceAccountKey` responses.
+ bytes public_key_data = 7;
+
+ // The key can be used after this timestamp.
+ google.protobuf.Timestamp valid_after_time = 4;
+
+ // The key can be used before this timestamp.
+ google.protobuf.Timestamp valid_before_time = 5;
+}
+
+// The service account key create request.
+message CreateServiceAccountKeyRequest {
+ // The resource name of the service account in the following format:
+ // `projects/{project}/serviceAccounts/{account}`.
+ // Using `-` as a wildcard for the project will infer the project from
+ // the account. The `account` value can be the `email` address or the
+ // `unique_id` of the service account.
+ string name = 1;
+
+ // The output format of the private key. `GOOGLE_CREDENTIALS_FILE` is the
+ // default output format.
+ ServiceAccountPrivateKeyType private_key_type = 2;
+
+ // Which type of key and algorithm to use for the key.
+ // The default is currently a 4K RSA key. However this may change in the
+ // future.
+ ServiceAccountKeyAlgorithm key_algorithm = 3;
+}
+
+// The service account key delete request.
+message DeleteServiceAccountKeyRequest {
+ // The resource name of the service account key in the following format:
+ // `projects/{project}/serviceAccounts/{account}/keys/{key}`.
+ // Using `-` as a wildcard for the project will infer the project from
+ // the account. The `account` value can be the `email` address or the
+ // `unique_id` of the service account.
+ string name = 1;
+}
+
+// The service account sign blob request.
+message SignBlobRequest {
+ // The resource name of the service account in the following format:
+ // `projects/{project}/serviceAccounts/{account}`.
+ // Using `-` as a wildcard for the project will infer the project from
+ // the account. The `account` value can be the `email` address or the
+ // `unique_id` of the service account.
+ string name = 1;
+
+ // The bytes to sign.
+ bytes bytes_to_sign = 2;
+}
+
+// The service account sign blob response.
+message SignBlobResponse {
+ // The id of the key used to sign the blob.
+ string key_id = 1;
+
+ // The signed blob.
+ bytes signature = 2;
+}
+
+// A role in the Identity and Access Management API.
+message Role {
+ // The name of the role.
+ //
+ // When Role is used in CreateRole, the role name must not be set.
+ //
+ // When Role is used in output and other input such as UpdateRole, the role
+ // name is the complete path, e.g., roles/logging.viewer for curated roles
+ // and organizations/{organization-id}/roles/logging.viewer for custom roles.
+ string name = 1;
+
+ // Optional. A human-readable title for the role. Typically this
+ // is limited to 100 UTF-8 bytes.
+ string title = 2;
+
+ // Optional. A human-readable description for the role.
+ string description = 3;
+}
+
+// The grantable role query request.
+message QueryGrantableRolesRequest {
+ // Required. The full resource name to query from the list of grantable roles.
+ //
+ // The name follows the Google Cloud Platform resource format.
+ // For example, a Cloud Platform project with id `my-project` will be named
+ // `//cloudresourcemanager.googleapis.com/projects/my-project`.
+ string full_resource_name = 1;
+}
+
+// The grantable role query response.
+message QueryGrantableRolesResponse {
+ // The list of matching roles.
+ repeated Role roles = 1;
+}
+
+// Supported key algorithms.
+enum ServiceAccountKeyAlgorithm {
+ // An unspecified key algorithm.
+ KEY_ALG_UNSPECIFIED = 0;
+
+ // 1k RSA Key.
+ KEY_ALG_RSA_1024 = 1;
+
+ // 2k RSA Key.
+ KEY_ALG_RSA_2048 = 2;
+}
+
+// Supported private key output formats.
+enum ServiceAccountPrivateKeyType {
+ // Unspecified. Equivalent to `TYPE_GOOGLE_CREDENTIALS_FILE`.
+ TYPE_UNSPECIFIED = 0;
+
+ // PKCS12 format.
+ // The password for the PKCS12 file is `notasecret`.
+ // For more information, see https://tools.ietf.org/html/rfc7292.
+ TYPE_PKCS12_FILE = 1;
+
+ // Google Credentials File format.
+ TYPE_GOOGLE_CREDENTIALS_FILE = 2;
+}
+
+// Supported public key output formats.
+enum ServiceAccountPublicKeyType {
+ // Unspecified. Returns nothing here.
+ TYPE_NONE = 0;
+
+ // X509 PEM format.
+ TYPE_X509_PEM_FILE = 1;
+
+ // Raw public key.
+ TYPE_RAW_PUBLIC_KEY = 2;
+}
diff --git a/third_party/googleapis/google/iam/admin/v1/iam_gapic.yaml b/third_party/googleapis/google/iam/admin/v1/iam_gapic.yaml
new file mode 100644
index 0000000000..c0ca1aa03a
--- /dev/null
+++ b/third_party/googleapis/google/iam/admin/v1/iam_gapic.yaml
@@ -0,0 +1,251 @@
+type: com.google.api.codegen.ConfigProto
+language_settings:
+ java:
+ package_name: com.google.cloud.iam.admin.spi.v1
+ python:
+ package_name: google.cloud.gapic.iam_admin.v1
+ go:
+ package_name: cloud.google.com/go/iam/admin/apiv1
+ csharp:
+ package_name: Google.Iam.Admin.V1
+ ruby:
+ package_name: Google::Cloud::Iam::Admin::V1
+ php:
+ package_name: Google\Cloud\Iam\Admin\V1
+ nodejs:
+ package_name: iam.v1
+ domain_layer_location: google-cloud
+license_header:
+ copyright_file: copyright-google.txt
+ license_file: license-header-apache-2.0.txt
+interfaces:
+- name: google.iam.admin.v1.IAM
+ collections:
+ - name_pattern: projects/{project}
+ entity_name: project
+ - name_pattern: projects/{project}/serviceAccounts/{service_account}
+ entity_name: service_account
+ - name_pattern: projects/{project}/serviceAccounts/{service_account}/keys/{key}
+ entity_name: key
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 20000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 20000
+ total_timeout_millis: 600000
+ methods:
+ - name: ListServiceAccounts
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: true
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: accounts
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: project
+ timeout_millis: 60000
+ - name: GetServiceAccount
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: service_account
+ timeout_millis: 60000
+ - name: CreateServiceAccount
+ flattening:
+ groups:
+ - parameters:
+ - name
+ - account_id
+ - service_account
+ required_fields:
+ - name
+ - account_id
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: project
+ timeout_millis: 60000
+ - name: UpdateServiceAccount
+ required_fields:
+ - etag
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: service_account
+ timeout_millis: 60000
+ - name: DeleteServiceAccount
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: service_account
+ timeout_millis: 60000
+ - name: ListServiceAccountKeys
+ flattening:
+ groups:
+ - parameters:
+ - name
+ - key_types
+ required_fields:
+ - name
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: service_account
+ timeout_millis: 60000
+ - name: GetServiceAccountKey
+ flattening:
+ groups:
+ - parameters:
+ - name
+ - public_key_type
+ required_fields:
+ - name
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: key
+ timeout_millis: 60000
+ - name: CreateServiceAccountKey
+ flattening:
+ groups:
+ - parameters:
+ - name
+ - private_key_type
+ - key_algorithm
+ required_fields:
+ - name
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: service_account
+ timeout_millis: 60000
+ - name: DeleteServiceAccountKey
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: key
+ timeout_millis: 60000
+ - name: SignBlob
+ flattening:
+ groups:
+ - parameters:
+ - name
+ - bytes_to_sign
+ required_fields:
+ - name
+ - bytes_to_sign
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: service_account
+ timeout_millis: 60000
+ - name: GetIamPolicy
+ flattening:
+ groups:
+ - parameters:
+ - resource
+ required_fields:
+ - resource
+ request_object_method: false
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ resource: service_account
+ timeout_millis: 60000
+ surface_treatments:
+ - include_languages:
+ - go
+ visibility: PRIVATE
+ - name: SetIamPolicy
+ flattening:
+ groups:
+ - parameters:
+ - resource
+ - policy
+ required_fields:
+ - resource
+ - policy
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ resource: service_account
+ timeout_millis: 60000
+ surface_treatments:
+ - include_languages:
+ - go
+ visibility: PRIVATE
+ - name: TestIamPermissions
+ flattening:
+ groups:
+ - parameters:
+ - resource
+ - permissions
+ required_fields:
+ - resource
+ - permissions
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ resource: service_account
+ timeout_millis: 60000
+ - name: QueryGrantableRoles
+ flattening:
+ groups:
+ - parameters:
+ - full_resource_name
+ required_fields:
+ - full_resource_name
+ request_object_method: false
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 60000
diff --git a/third_party/googleapis/google/iam/iam.yaml b/third_party/googleapis/google/iam/iam.yaml
new file mode 100644
index 0000000000..ccff586b8c
--- /dev/null
+++ b/third_party/googleapis/google/iam/iam.yaml
@@ -0,0 +1,21 @@
+# The IAM API Definition.
+
+type: google.api.Service
+config_version: 2
+name: iam.googleapis.com
+
+title: Google Identity and Access Management (IAM) API
+
+documentation:
+ summary:
+ Manages identity and access control for Google Cloud Platform resources, including the creation of service accounts, which you can use to authenticate to Google and make API calls.
+
+apis:
+- name: google.iam.admin.v1.IAM
+
+authentication:
+ rules:
+ - selector: '*'
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/iam,
+ https://www.googleapis.com/auth/cloud-platform
diff --git a/third_party/googleapis/google/iam/v1/iam_policy.proto b/third_party/googleapis/google/iam/v1/iam_policy.proto
new file mode 100644
index 0000000000..15e216f194
--- /dev/null
+++ b/third_party/googleapis/google/iam/v1/iam_policy.proto
@@ -0,0 +1,118 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.iam.v1;
+
+import "google/api/annotations.proto";
+import "google/iam/v1/policy.proto";
+
+option cc_enable_arenas = true;
+option csharp_namespace = "Google.Cloud.Iam.V1";
+option go_package = "google.golang.org/genproto/googleapis/iam/v1;iam";
+option java_multiple_files = true;
+option java_outer_classname = "IamPolicyProto";
+option java_package = "com.google.iam.v1";
+
+
+// ## API Overview
+//
+// Manages Identity and Access Management (IAM) policies.
+//
+// Any implementation of an API that offers access control features
+// implements the google.iam.v1.IAMPolicy interface.
+//
+// ## Data model
+//
+// Access control is applied when a principal (user or service account), takes
+// some action on a resource exposed by a service. Resources, identified by
+// URI-like names, are the unit of access control specification. Service
+// implementations can choose the granularity of access control and the
+// supported permissions for their resources.
+// For example one database service may allow access control to be
+// specified only at the Table level, whereas another might allow access control
+// to also be specified at the Column level.
+//
+// ## Policy Structure
+//
+// See google.iam.v1.Policy
+//
+// This is intentionally not a CRUD style API because access control policies
+// are created and deleted implicitly with the resources to which they are
+// attached.
+service IAMPolicy {
+ // Sets the access control policy on the specified resource. Replaces any
+ // existing policy.
+ rpc SetIamPolicy(SetIamPolicyRequest) returns (Policy) {
+ option (google.api.http) = { post: "/v1/{resource=**}:setIamPolicy" body: "*" };
+ }
+
+ // Gets the access control policy for a resource.
+ // Returns an empty policy if the resource exists and does not have a policy
+ // set.
+ rpc GetIamPolicy(GetIamPolicyRequest) returns (Policy) {
+ option (google.api.http) = { post: "/v1/{resource=**}:getIamPolicy" body: "*" };
+ }
+
+ // Returns permissions that a caller has on the specified resource.
+ // If the resource does not exist, this will return an empty set of
+ // permissions, not a NOT_FOUND error.
+ rpc TestIamPermissions(TestIamPermissionsRequest) returns (TestIamPermissionsResponse) {
+ option (google.api.http) = { post: "/v1/{resource=**}:testIamPermissions" body: "*" };
+ }
+}
+
+// Request message for `SetIamPolicy` method.
+message SetIamPolicyRequest {
+ // REQUIRED: The resource for which the policy is being specified.
+ // `resource` is usually specified as a path. For example, a Project
+ // resource is specified as `projects/{project}`.
+ string resource = 1;
+
+ // REQUIRED: The complete policy to be applied to the `resource`. The size of
+ // the policy is limited to a few 10s of KB. An empty policy is a
+ // valid policy but certain Cloud Platform services (such as Projects)
+ // might reject them.
+ Policy policy = 2;
+}
+
+// Request message for `GetIamPolicy` method.
+message GetIamPolicyRequest {
+ // REQUIRED: The resource for which the policy is being requested.
+ // `resource` is usually specified as a path. For example, a Project
+ // resource is specified as `projects/{project}`.
+ string resource = 1;
+}
+
+// Request message for `TestIamPermissions` method.
+message TestIamPermissionsRequest {
+ // REQUIRED: The resource for which the policy detail is being requested.
+ // `resource` is usually specified as a path. For example, a Project
+ // resource is specified as `projects/{project}`.
+ string resource = 1;
+
+ // The set of permissions to check for the `resource`. Permissions with
+ // wildcards (such as '*' or 'storage.*') are not allowed. For more
+ // information see
+ // [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).
+ repeated string permissions = 2;
+}
+
+// Response message for `TestIamPermissions` method.
+message TestIamPermissionsResponse {
+ // A subset of `TestPermissionsRequest.permissions` that the caller is
+ // allowed.
+ repeated string permissions = 1;
+}
diff --git a/third_party/googleapis/google/iam/v1/policy.proto b/third_party/googleapis/google/iam/v1/policy.proto
new file mode 100644
index 0000000000..a09b544313
--- /dev/null
+++ b/third_party/googleapis/google/iam/v1/policy.proto
@@ -0,0 +1,149 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.iam.v1;
+
+import "google/api/annotations.proto";
+
+option cc_enable_arenas = true;
+option csharp_namespace = "Google.Cloud.Iam.V1";
+option go_package = "google.golang.org/genproto/googleapis/iam/v1;iam";
+option java_multiple_files = true;
+option java_outer_classname = "PolicyProto";
+option java_package = "com.google.iam.v1";
+
+
+// Defines an Identity and Access Management (IAM) policy. It is used to
+// specify access control policies for Cloud Platform resources.
+//
+//
+// A `Policy` consists of a list of `bindings`. A `Binding` binds a list of
+// `members` to a `role`, where the members can be user accounts, Google groups,
+// Google domains, and service accounts. A `role` is a named list of permissions
+// defined by IAM.
+//
+// **Example**
+//
+// {
+// "bindings": [
+// {
+// "role": "roles/owner",
+// "members": [
+// "user:mike@example.com",
+// "group:admins@example.com",
+// "domain:google.com",
+// "serviceAccount:my-other-app@appspot.gserviceaccount.com",
+// ]
+// },
+// {
+// "role": "roles/viewer",
+// "members": ["user:sean@example.com"]
+// }
+// ]
+// }
+//
+// For a description of IAM and its features, see the
+// [IAM developer's guide](https://cloud.google.com/iam).
+message Policy {
+ // Version of the `Policy`. The default version is 0.
+ int32 version = 1;
+
+ // Associates a list of `members` to a `role`.
+ // Multiple `bindings` must not be specified for the same `role`.
+ // `bindings` with no members will result in an error.
+ repeated Binding bindings = 4;
+
+ // `etag` is used for optimistic concurrency control as a way to help
+ // prevent simultaneous updates of a policy from overwriting each other.
+ // It is strongly suggested that systems make use of the `etag` in the
+ // read-modify-write cycle to perform policy updates in order to avoid race
+ // conditions: An `etag` is returned in the response to `getIamPolicy`, and
+ // systems are expected to put that etag in the request to `setIamPolicy` to
+ // ensure that their change will be applied to the same version of the policy.
+ //
+ // If no `etag` is provided in the call to `setIamPolicy`, then the existing
+ // policy is overwritten blindly.
+ bytes etag = 3;
+}
+
+// Associates `members` with a `role`.
+message Binding {
+ // Role that is assigned to `members`.
+ // For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
+ // Required
+ string role = 1;
+
+ // Specifies the identities requesting access for a Cloud Platform resource.
+ // `members` can have the following values:
+ //
+ // * `allUsers`: A special identifier that represents anyone who is
+ // on the internet; with or without a Google account.
+ //
+ // * `allAuthenticatedUsers`: A special identifier that represents anyone
+ // who is authenticated with a Google account or a service account.
+ //
+ // * `user:{emailid}`: An email address that represents a specific Google
+ // account. For example, `alice@gmail.com` or `joe@example.com`.
+ //
+ //
+ // * `serviceAccount:{emailid}`: An email address that represents a service
+ // account. For example, `my-other-app@appspot.gserviceaccount.com`.
+ //
+ // * `group:{emailid}`: An email address that represents a Google group.
+ // For example, `admins@example.com`.
+ //
+ // * `domain:{domain}`: A Google Apps domain name that represents all the
+ // users of that domain. For example, `google.com` or `example.com`.
+ //
+ //
+ repeated string members = 2;
+}
+
+// The difference delta between two policies.
+message PolicyDelta {
+ // The delta for Bindings between two policies.
+ repeated BindingDelta binding_deltas = 1;
+}
+
+// One delta entry for Binding. Each individual change (only one member in each
+// entry) to a binding will be a separate entry.
+message BindingDelta {
+ // The type of action performed on a Binding in a policy.
+ enum Action {
+ // Unspecified.
+ ACTION_UNSPECIFIED = 0;
+
+ // Addition of a Binding.
+ ADD = 1;
+
+ // Removal of a Binding.
+ REMOVE = 2;
+ }
+
+ // The action that was performed on a Binding.
+ // Required
+ Action action = 1;
+
+ // Role that is assigned to `members`.
+ // For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
+ // Required
+ string role = 2;
+
+ // A single identity requesting access for a Cloud Platform resource.
+ // Follows the same format of Binding.members.
+ // Required
+ string member = 3;
+}
diff --git a/third_party/googleapis/google/logging/README.md b/third_party/googleapis/google/logging/README.md
new file mode 100644
index 0000000000..f1989ac628
--- /dev/null
+++ b/third_party/googleapis/google/logging/README.md
@@ -0,0 +1,3 @@
+# Introduction
+
+The Stackdriver Logging service. \ No newline at end of file
diff --git a/third_party/googleapis/google/logging/logging.yaml b/third_party/googleapis/google/logging/logging.yaml
new file mode 100644
index 0000000000..66e2088149
--- /dev/null
+++ b/third_party/googleapis/google/logging/logging.yaml
@@ -0,0 +1,80 @@
+# Stackdriver Logging API service configuration
+
+type: google.api.Service
+config_version: 2
+name: logging.googleapis.com
+title: Stackdriver Logging API
+
+apis:
+- name: google.logging.v2.LoggingServiceV2
+- name: google.logging.v2.ConfigServiceV2
+- name: google.logging.v2.MetricsServiceV2
+
+documentation:
+ summary:
+ The Stackdriver Logging API lets you write log
+ entries and manage your logs, log sinks and logs-based metrics.
+ overview: |
+ (== suppress_warning versioning-http-version-prefix ==)
+
+# Auth section
+authentication:
+ rules:
+ - selector: |-
+ google.logging.v1.LoggingService.DeleteLog,
+ google.logging.v1.ConfigService.CreateLogServiceSink,
+ google.logging.v1.ConfigService.CreateLogSink,
+ google.logging.v1.ConfigService.CreateSink,
+ google.logging.v1.ConfigService.DeleteLogServiceSink,
+ google.logging.v1.ConfigService.DeleteLogSink,
+ google.logging.v1.ConfigService.DeleteSink,
+ google.logging.v1.ConfigService.UpdateLogServiceSink,
+ google.logging.v1.ConfigService.UpdateLogSink,
+ google.logging.v1.ConfigService.UpdateSink,
+ google.logging.v2.LoggingServiceV2.DeleteLog,
+ google.logging.v2.ConfigServiceV2.CreateSink,
+ google.logging.v2.ConfigServiceV2.DeleteSink,
+ google.logging.v2.ConfigServiceV2.UpdateSink,
+ oauth:
+ canonical_scopes: |-
+ https://www.googleapis.com/auth/logging.admin,
+ https://www.googleapis.com/auth/cloud-platform
+ - selector: |-
+ google.logging.v1.LoggingService.ListLogEntries,
+ google.logging.v1.LoggingService.ListLogServiceIndexes,
+ google.logging.v1.LoggingService.ListLogServices,
+ google.logging.v1.LoggingService.ListLogs,
+ google.logging.v1.ConfigService.GetLogServiceSink,
+ google.logging.v1.ConfigService.GetLogSink,
+ google.logging.v1.ConfigService.GetSink,
+ google.logging.v1.ConfigService.ListLogServiceSinks,
+ google.logging.v1.ConfigService.ListLogSinks,
+ google.logging.v1.ConfigService.ListSinks,
+ google.logging.v1.MetricsService.GetLogMetric,
+ google.logging.v1.MetricsService.ListLogMetrics,
+ google.logging.v2.LoggingServiceV2.ListLogEntries,
+ google.logging.v2.LoggingServiceV2.ListMonitoredResourceDescriptors,
+ google.logging.v2.ConfigServiceV2.GetSink,
+ google.logging.v2.ConfigServiceV2.ListSinks,
+ google.logging.v2.MetricsServiceV2.GetLogMetric,
+ google.logging.v2.MetricsServiceV2.ListLogMetrics
+ oauth:
+ canonical_scopes: |-
+ https://www.googleapis.com/auth/logging.read,
+ https://www.googleapis.com/auth/logging.admin,
+ https://www.googleapis.com/auth/cloud-platform.read-only,
+ https://www.googleapis.com/auth/cloud-platform
+ - selector: |-
+ google.logging.v1.LoggingService.WriteLogEntries,
+ google.logging.v1.MetricsService.CreateLogMetric,
+ google.logging.v1.MetricsService.DeleteLogMetric,
+ google.logging.v1.MetricsService.UpdateLogMetric,
+ google.logging.v2.LoggingServiceV2.WriteLogEntries,
+ google.logging.v2.MetricsServiceV2.CreateLogMetric,
+ google.logging.v2.MetricsServiceV2.DeleteLogMetric,
+ google.logging.v2.MetricsServiceV2.UpdateLogMetric
+ oauth:
+ canonical_scopes: |-
+ https://www.googleapis.com/auth/logging.write,
+ https://www.googleapis.com/auth/logging.admin,
+ https://www.googleapis.com/auth/cloud-platform
diff --git a/third_party/googleapis/google/logging/type/http_request.proto b/third_party/googleapis/google/logging/type/http_request.proto
new file mode 100644
index 0000000000..193866ddf6
--- /dev/null
+++ b/third_party/googleapis/google/logging/type/http_request.proto
@@ -0,0 +1,88 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.logging.type;
+
+import "google/api/annotations.proto";
+import "google/protobuf/duration.proto";
+
+option csharp_namespace = "Google.Cloud.Logging.Type";
+option go_package = "google.golang.org/genproto/googleapis/logging/type;ltype";
+option java_multiple_files = true;
+option java_outer_classname = "HttpRequestProto";
+option java_package = "com.google.logging.type";
+
+
+// A common proto for logging HTTP requests. Only contains semantics
+// defined by the HTTP specification. Product-specific logging
+// information MUST be defined in a separate message.
+message HttpRequest {
+ // The request method. Examples: `"GET"`, `"HEAD"`, `"PUT"`, `"POST"`.
+ string request_method = 1;
+
+ // The scheme (http, https), the host name, the path and the query
+ // portion of the URL that was requested.
+ // Example: `"http://example.com/some/info?color=red"`.
+ string request_url = 2;
+
+ // The size of the HTTP request message in bytes, including the request
+ // headers and the request body.
+ int64 request_size = 3;
+
+ // The response code indicating the status of response.
+ // Examples: 200, 404.
+ int32 status = 4;
+
+ // The size of the HTTP response message sent back to the client, in bytes,
+ // including the response headers and the response body.
+ int64 response_size = 5;
+
+ // The user agent sent by the client. Example:
+ // `"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Q312461; .NET CLR 1.0.3705)"`.
+ string user_agent = 6;
+
+ // The IP address (IPv4 or IPv6) of the client that issued the HTTP
+ // request. Examples: `"192.168.1.1"`, `"FE80::0202:B3FF:FE1E:8329"`.
+ string remote_ip = 7;
+
+ // The IP address (IPv4 or IPv6) of the origin server that the request was
+ // sent to.
+ string server_ip = 13;
+
+ // The referer URL of the request, as defined in
+ // [HTTP/1.1 Header Field Definitions](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html).
+ string referer = 8;
+
+ // The request processing latency on the server, from the time the request was
+ // received until the response was sent.
+ google.protobuf.Duration latency = 14;
+
+ // Whether or not a cache lookup was attempted.
+ bool cache_lookup = 11;
+
+ // Whether or not an entity was served from cache
+ // (with or without validation).
+ bool cache_hit = 9;
+
+ // Whether or not the response was validated with the origin server before
+ // being served from cache. This field is only meaningful if `cache_hit` is
+ // True.
+ bool cache_validated_with_origin_server = 10;
+
+ // The number of HTTP response bytes inserted into cache. Set only when a
+ // cache fill was attempted.
+ int64 cache_fill_bytes = 12;
+}
diff --git a/third_party/googleapis/google/logging/type/log_severity.proto b/third_party/googleapis/google/logging/type/log_severity.proto
new file mode 100644
index 0000000000..acb924aa76
--- /dev/null
+++ b/third_party/googleapis/google/logging/type/log_severity.proto
@@ -0,0 +1,71 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.logging.type;
+
+import "google/api/annotations.proto";
+
+option csharp_namespace = "Google.Cloud.Logging.Type";
+option go_package = "google.golang.org/genproto/googleapis/logging/type;ltype";
+option java_multiple_files = true;
+option java_outer_classname = "LogSeverityProto";
+option java_package = "com.google.logging.type";
+
+
+// The severity of the event described in a log entry, expressed as one of the
+// standard severity levels listed below. For your reference, the levels are
+// assigned the listed numeric values. The effect of using numeric values other
+// than those listed is undefined.
+//
+// You can filter for log entries by severity. For example, the following
+// filter expression will match log entries with severities `INFO`, `NOTICE`,
+// and `WARNING`:
+//
+// severity > DEBUG AND severity <= WARNING
+//
+// If you are writing log entries, you should map other severity encodings to
+// one of these standard levels. For example, you might map all of Java's FINE,
+// FINER, and FINEST levels to `LogSeverity.DEBUG`. You can preserve the
+// original severity level in the log entry payload if you wish.
+enum LogSeverity {
+ // (0) The log entry has no assigned severity level.
+ DEFAULT = 0;
+
+ // (100) Debug or trace information.
+ DEBUG = 100;
+
+ // (200) Routine information, such as ongoing status or performance.
+ INFO = 200;
+
+ // (300) Normal but significant events, such as start up, shut down, or
+ // a configuration change.
+ NOTICE = 300;
+
+ // (400) Warning events might cause problems.
+ WARNING = 400;
+
+ // (500) Error events are likely to cause problems.
+ ERROR = 500;
+
+ // (600) Critical events cause more severe problems or outages.
+ CRITICAL = 600;
+
+ // (700) A person must take an action immediately.
+ ALERT = 700;
+
+ // (800) One or more systems are unusable.
+ EMERGENCY = 800;
+}
diff --git a/third_party/googleapis/google/logging/v2/log_entry.proto b/third_party/googleapis/google/logging/v2/log_entry.proto
new file mode 100644
index 0000000000..a767a9983f
--- /dev/null
+++ b/third_party/googleapis/google/logging/v2/log_entry.proto
@@ -0,0 +1,157 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.logging.v2;
+
+import "google/api/annotations.proto";
+import "google/api/monitored_resource.proto";
+import "google/logging/type/http_request.proto";
+import "google/logging/type/log_severity.proto";
+import "google/protobuf/any.proto";
+import "google/protobuf/struct.proto";
+import "google/protobuf/timestamp.proto";
+
+option cc_enable_arenas = true;
+option csharp_namespace = "Google.Cloud.Logging.V2";
+option go_package = "google.golang.org/genproto/googleapis/logging/v2;logging";
+option java_multiple_files = true;
+option java_outer_classname = "LogEntryProto";
+option java_package = "com.google.logging.v2";
+
+
+// An individual entry in a log.
+message LogEntry {
+ // Required. The resource name of the log to which this log entry belongs:
+ //
+ // "projects/[PROJECT_ID]/logs/[LOG_ID]"
+ // "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]"
+ // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]"
+ // "folders/[FOLDER_ID]/logs/[LOG_ID]"
+ //
+ // `[LOG_ID]` must be URL-encoded within `log_name`. Example:
+ // `"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"`.
+ // `[LOG_ID]` must be less than 512 characters long and can only include the
+ // following characters: upper and lower case alphanumeric characters,
+ // forward-slash, underscore, hyphen, and period.
+ //
+ // For backward compatibility, if `log_name` begins with a forward-slash, such
+ // as `/projects/...`, then the log entry is ingested as usual but the
+ // forward-slash is removed. Listing the log entry will not show the leading
+ // slash and filtering for a log name with a leading slash will never return
+ // any results.
+ string log_name = 12;
+
+ // Required. The monitored resource associated with this log entry.
+ // Example: a log entry that reports a database error would be
+ // associated with the monitored resource designating the particular
+ // database that reported the error.
+ google.api.MonitoredResource resource = 8;
+
+ // Optional. The log entry payload, which can be one of multiple types.
+ oneof payload {
+ // The log entry payload, represented as a protocol buffer. Some
+ // Google Cloud Platform services use this field for their log
+ // entry payloads.
+ google.protobuf.Any proto_payload = 2;
+
+ // The log entry payload, represented as a Unicode string (UTF-8).
+ string text_payload = 3;
+
+ // The log entry payload, represented as a structure that is
+ // expressed as a JSON object.
+ google.protobuf.Struct json_payload = 6;
+ }
+
+ // Optional. The time the event described by the log entry occurred. If
+ // omitted in a new log entry, Stackdriver Logging will insert the time the
+ // log entry is received. Stackdriver Logging might reject log entries whose
+ // time stamps are more than a couple of hours in the future. Log entries
+ // with time stamps in the past are accepted.
+ google.protobuf.Timestamp timestamp = 9;
+
+ // Optional. The severity of the log entry. The default value is
+ // `LogSeverity.DEFAULT`.
+ google.logging.type.LogSeverity severity = 10;
+
+ // Optional. A unique identifier for the log entry. If you provide a value,
+ // then Stackdriver Logging considers other log entries in the same project,
+ // with the same `timestamp`, and with the same `insert_id` to be duplicates
+ // which can be removed. If omitted in new log entries, then Stackdriver
+ // Logging will insert its own unique identifier. The `insert_id` is used
+ // to order log entries that have the same `timestamp` value.
+ string insert_id = 4;
+
+ // Optional. Information about the HTTP request associated with this
+ // log entry, if applicable.
+ google.logging.type.HttpRequest http_request = 7;
+
+ // Optional. A set of user-defined (key, value) data that provides additional
+ // information about the log entry.
+ map<string, string> labels = 11;
+
+ // Optional. Information about an operation associated with the log entry, if
+ // applicable.
+ LogEntryOperation operation = 15;
+
+ // Optional. Resource name of the trace associated with the log entry, if any.
+ // If it contains a relative resource name, the name is assumed to be relative
+ // to `//tracing.googleapis.com`. Example:
+ // `projects/my-projectid/traces/06796866738c859f2f19b7cfb3214824`
+ string trace = 22;
+
+ // Optional. Source code location information associated with the log entry,
+ // if any.
+ LogEntrySourceLocation source_location = 23;
+}
+
+// Additional information about a potentially long-running operation with which
+// a log entry is associated.
+message LogEntryOperation {
+ // Optional. An arbitrary operation identifier. Log entries with the
+ // same identifier are assumed to be part of the same operation.
+ string id = 1;
+
+ // Optional. An arbitrary producer identifier. The combination of
+ // `id` and `producer` must be globally unique. Examples for `producer`:
+ // `"MyDivision.MyBigCompany.com"`, `"github.com/MyProject/MyApplication"`.
+ string producer = 2;
+
+ // Optional. Set this to True if this is the first log entry in the operation.
+ bool first = 3;
+
+ // Optional. Set this to True if this is the last log entry in the operation.
+ bool last = 4;
+}
+
+// Additional information about the source code location that produced the log
+// entry.
+message LogEntrySourceLocation {
+ // Optional. Source file name. Depending on the runtime environment, this
+ // might be a simple name or a fully-qualified name.
+ string file = 1;
+
+ // Optional. Line within the source file. 1-based; 0 indicates no line number
+ // available.
+ int64 line = 2;
+
+ // Optional. Human-readable name of the function or method being invoked, with
+ // optional context such as the class or package name. This information may be
+ // used in contexts such as the logs viewer, where a file and line number are
+ // less meaningful. The format can vary by language. For example:
+ // `qual.if.ied.Class.method` (Java), `dir/package.func` (Go), `function`
+ // (Python).
+ string function = 3;
+}
diff --git a/third_party/googleapis/google/logging/v2/logging.proto b/third_party/googleapis/google/logging/v2/logging.proto
new file mode 100644
index 0000000000..5403e5421f
--- /dev/null
+++ b/third_party/googleapis/google/logging/v2/logging.proto
@@ -0,0 +1,276 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.logging.v2;
+
+import "google/api/annotations.proto";
+import "google/api/monitored_resource.proto";
+import "google/logging/v2/log_entry.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+
+option cc_enable_arenas = true;
+option csharp_namespace = "Google.Cloud.Logging.V2";
+option go_package = "google.golang.org/genproto/googleapis/logging/v2;logging";
+option java_multiple_files = true;
+option java_outer_classname = "LoggingProto";
+option java_package = "com.google.logging.v2";
+
+
+// Service for ingesting and querying logs.
+service LoggingServiceV2 {
+ // Deletes all the log entries in a log.
+ // The log reappears if it receives new entries.
+ // Log entries written shortly before the delete operation might not be
+ // deleted.
+ rpc DeleteLog(DeleteLogRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v2beta1/{log_name=projects/*/logs/*}" };
+ }
+
+ // Writes log entries to Stackdriver Logging.
+ rpc WriteLogEntries(WriteLogEntriesRequest) returns (WriteLogEntriesResponse) {
+ option (google.api.http) = { post: "/v2/entries:write" body: "*" };
+ }
+
+ // Lists log entries. Use this method to retrieve log entries from
+ // Stackdriver Logging. For ways to export log entries, see
+ // [Exporting Logs](/logging/docs/export).
+ rpc ListLogEntries(ListLogEntriesRequest) returns (ListLogEntriesResponse) {
+ option (google.api.http) = { post: "/v2/entries:list" body: "*" };
+ }
+
+ // Lists the descriptors for monitored resource types used by Stackdriver
+ // Logging.
+ rpc ListMonitoredResourceDescriptors(ListMonitoredResourceDescriptorsRequest) returns (ListMonitoredResourceDescriptorsResponse) {
+ option (google.api.http) = { get: "/v2/monitoredResourceDescriptors" };
+ }
+
+ // Lists the logs in projects, organizations, folders, or billing accounts.
+ // Only logs that have entries are listed.
+ rpc ListLogs(ListLogsRequest) returns (ListLogsResponse) {
+ option (google.api.http) = { get: "/v2/{parent=projects/*}/logs" };
+ }
+}
+
+// The parameters to DeleteLog.
+message DeleteLogRequest {
+ // Required. The resource name of the log to delete:
+ //
+ // "projects/[PROJECT_ID]/logs/[LOG_ID]"
+ // "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]"
+ // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]"
+ // "folders/[FOLDER_ID]/logs/[LOG_ID]"
+ //
+ // `[LOG_ID]` must be URL-encoded. For example,
+ // `"projects/my-project-id/logs/syslog"`,
+ // `"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"`.
+ // For more information about log names, see
+ // [LogEntry][google.logging.v2.LogEntry].
+ string log_name = 1;
+}
+
+// The parameters to WriteLogEntries.
+message WriteLogEntriesRequest {
+ // Optional. A default log resource name that is assigned to all log entries
+ // in `entries` that do not specify a value for `log_name`:
+ //
+ // "projects/[PROJECT_ID]/logs/[LOG_ID]"
+ // "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]"
+ // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]"
+ // "folders/[FOLDER_ID]/logs/[LOG_ID]"
+ //
+ // `[LOG_ID]` must be URL-encoded. For example,
+ // `"projects/my-project-id/logs/syslog"` or
+ // `"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"`.
+ // For more information about log names, see
+ // [LogEntry][google.logging.v2.LogEntry].
+ string log_name = 1;
+
+ // Optional. A default monitored resource object that is assigned to all log
+ // entries in `entries` that do not specify a value for `resource`. Example:
+ //
+ // { "type": "gce_instance",
+ // "labels": {
+ // "zone": "us-central1-a", "instance_id": "00000000000000000000" }}
+ //
+ // See [LogEntry][google.logging.v2.LogEntry].
+ google.api.MonitoredResource resource = 2;
+
+ // Optional. Default labels that are added to the `labels` field of all log
+ // entries in `entries`. If a log entry already has a label with the same key
+ // as a label in this parameter, then the log entry's label is not changed.
+ // See [LogEntry][google.logging.v2.LogEntry].
+ map<string, string> labels = 3;
+
+ // Required. The log entries to write. Values supplied for the fields
+ // `log_name`, `resource`, and `labels` in this `entries.write` request are
+ // inserted into those log entries in this list that do not provide their own
+ // values.
+ //
+ // Stackdriver Logging also creates and inserts values for `timestamp` and
+ // `insert_id` if the entries do not provide them. The created `insert_id` for
+ // the N'th entry in this list will be greater than earlier entries and less
+ // than later entries. Otherwise, the order of log entries in this list does
+ // not matter.
+ //
+ // To improve throughput and to avoid exceeding the
+ // [quota limit](/logging/quota-policy) for calls to `entries.write`,
+ // you should write multiple log entries at once rather than
+ // calling this method for each individual log entry.
+ repeated LogEntry entries = 4;
+
+ // Optional. Whether valid entries should be written even if some other
+ // entries fail due to INVALID_ARGUMENT or PERMISSION_DENIED errors. If any
+ // entry is not written, then the response status is the error associated
+ // with one of the failed entries and the response includes error details
+ // keyed by the entries' zero-based index in the `entries.write` method.
+ bool partial_success = 5;
+}
+
+// Result returned from WriteLogEntries.
+// empty
+message WriteLogEntriesResponse {
+
+}
+
+// The parameters to `ListLogEntries`.
+message ListLogEntriesRequest {
+ // Deprecated. Use `resource_names` instead. One or more project identifiers
+ // or project numbers from which to retrieve log entries. Example:
+ // `"my-project-1A"`. If present, these project identifiers are converted to
+ // resource name format and added to the list of resources in
+ // `resource_names`.
+ repeated string project_ids = 1;
+
+ // Required. Names of one or more parent resources from which to
+ // retrieve log entries:
+ //
+ // "projects/[PROJECT_ID]"
+ // "organizations/[ORGANIZATION_ID]"
+ // "billingAccounts/[BILLING_ACCOUNT_ID]"
+ // "folders/[FOLDER_ID]"
+ //
+ // Projects listed in the `project_ids` field are added to this list.
+ repeated string resource_names = 8;
+
+ // Optional. A filter that chooses which log entries to return. See [Advanced
+ // Logs Filters](/logging/docs/view/advanced_filters). Only log entries that
+ // match the filter are returned. An empty filter matches all log entries in
+ // the resources listed in `resource_names`. Referencing a parent resource
+ // that is not listed in `resource_names` will cause the filter to return no
+ // results.
+ // The maximum length of the filter is 20000 characters.
+ string filter = 2;
+
+ // Optional. How the results should be sorted. Presently, the only permitted
+ // values are `"timestamp asc"` (default) and `"timestamp desc"`. The first
+ // option returns entries in order of increasing values of
+ // `LogEntry.timestamp` (oldest first), and the second option returns entries
+ // in order of decreasing timestamps (newest first). Entries with equal
+ // timestamps are returned in order of their `insert_id` values.
+ string order_by = 3;
+
+ // Optional. The maximum number of results to return from this request.
+ // Non-positive values are ignored. The presence of `next_page_token` in the
+ // response indicates that more results might be available.
+ int32 page_size = 4;
+
+ // Optional. If present, then retrieve the next batch of results from the
+ // preceding call to this method. `page_token` must be the value of
+ // `next_page_token` from the previous response. The values of other method
+ // parameters should be identical to those in the previous call.
+ string page_token = 5;
+}
+
+// Result returned from `ListLogEntries`.
+message ListLogEntriesResponse {
+ // A list of log entries.
+ repeated LogEntry entries = 1;
+
+ // If there might be more results than those appearing in this response, then
+ // `nextPageToken` is included. To get the next set of results, call this
+ // method again using the value of `nextPageToken` as `pageToken`.
+ //
+ // If a value for `next_page_token` appears and the `entries` field is empty,
+ // it means that the search found no log entries so far but it did not have
+ // time to search all the possible log entries. Retry the method with this
+ // value for `page_token` to continue the search. Alternatively, consider
+ // speeding up the search by changing your filter to specify a single log name
+ // or resource type, or to narrow the time range of the search.
+ string next_page_token = 2;
+}
+
+// The parameters to ListMonitoredResourceDescriptors
+message ListMonitoredResourceDescriptorsRequest {
+ // Optional. The maximum number of results to return from this request.
+ // Non-positive values are ignored. The presence of `nextPageToken` in the
+ // response indicates that more results might be available.
+ int32 page_size = 1;
+
+ // Optional. If present, then retrieve the next batch of results from the
+ // preceding call to this method. `pageToken` must be the value of
+ // `nextPageToken` from the previous response. The values of other method
+ // parameters should be identical to those in the previous call.
+ string page_token = 2;
+}
+
+// Result returned from ListMonitoredResourceDescriptors.
+message ListMonitoredResourceDescriptorsResponse {
+ // A list of resource descriptors.
+ repeated google.api.MonitoredResourceDescriptor resource_descriptors = 1;
+
+ // If there might be more results than those appearing in this response, then
+ // `nextPageToken` is included. To get the next set of results, call this
+ // method again using the value of `nextPageToken` as `pageToken`.
+ string next_page_token = 2;
+}
+
+// The parameters to ListLogs.
+message ListLogsRequest {
+ // Required. The resource name that owns the logs:
+ //
+ // "projects/[PROJECT_ID]"
+ // "organizations/[ORGANIZATION_ID]"
+ // "billingAccounts/[BILLING_ACCOUNT_ID]"
+ // "folders/[FOLDER_ID]"
+ string parent = 1;
+
+ // Optional. The maximum number of results to return from this request.
+ // Non-positive values are ignored. The presence of `nextPageToken` in the
+ // response indicates that more results might be available.
+ int32 page_size = 2;
+
+ // Optional. If present, then retrieve the next batch of results from the
+ // preceding call to this method. `pageToken` must be the value of
+ // `nextPageToken` from the previous response. The values of other method
+ // parameters should be identical to those in the previous call.
+ string page_token = 3;
+}
+
+// Result returned from ListLogs.
+message ListLogsResponse {
+ // A list of log names. For example,
+ // `"projects/my-project/syslog"` or
+ // `"organizations/123/cloudresourcemanager.googleapis.com%2Factivity"`.
+ repeated string log_names = 3;
+
+ // If there might be more results than those appearing in this response, then
+ // `nextPageToken` is included. To get the next set of results, call this
+ // method again using the value of `nextPageToken` as `pageToken`.
+ string next_page_token = 2;
+}
diff --git a/third_party/googleapis/google/logging/v2/logging_config.proto b/third_party/googleapis/google/logging/v2/logging_config.proto
new file mode 100644
index 0000000000..1739ada7f1
--- /dev/null
+++ b/third_party/googleapis/google/logging/v2/logging_config.proto
@@ -0,0 +1,276 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.logging.v2;
+
+import "google/api/annotations.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/timestamp.proto";
+
+option csharp_namespace = "Google.Cloud.Logging.V2";
+option go_package = "google.golang.org/genproto/googleapis/logging/v2;logging";
+option java_multiple_files = true;
+option java_outer_classname = "LoggingConfigProto";
+option java_package = "com.google.logging.v2";
+
+
+// Service for configuring sinks used to export log entries outside of
+// Stackdriver Logging.
+service ConfigServiceV2 {
+ // Lists sinks.
+ rpc ListSinks(ListSinksRequest) returns (ListSinksResponse) {
+ option (google.api.http) = { get: "/v2/{parent=projects/*}/sinks" };
+ }
+
+ // Gets a sink.
+ rpc GetSink(GetSinkRequest) returns (LogSink) {
+ option (google.api.http) = { get: "/v2/{sink_name=projects/*/sinks/*}" };
+ }
+
+ // Creates a sink that exports specified log entries to a destination. The
+ // export of newly-ingested log entries begins immediately, unless the current
+ // time is outside the sink's start and end times or the sink's
+ // `writer_identity` is not permitted to write to the destination. A sink can
+ // export log entries only from the resource owning the sink.
+ rpc CreateSink(CreateSinkRequest) returns (LogSink) {
+ option (google.api.http) = { post: "/v2/{parent=projects/*}/sinks" body: "sink" };
+ }
+
+ // Updates a sink. If the named sink doesn't exist, then this method is
+ // identical to
+ // [sinks.create](/logging/docs/api/reference/rest/v2/projects.sinks/create).
+ // If the named sink does exist, then this method replaces the following
+ // fields in the existing sink with values from the new sink: `destination`,
+ // `filter`, `output_version_format`, `start_time`, and `end_time`.
+ // The updated filter might also have a new `writer_identity`; see the
+ // `unique_writer_identity` field.
+ rpc UpdateSink(UpdateSinkRequest) returns (LogSink) {
+ option (google.api.http) = { put: "/v2/{sink_name=projects/*/sinks/*}" body: "sink" };
+ }
+
+ // Deletes a sink. If the sink has a unique `writer_identity`, then that
+ // service account is also deleted.
+ rpc DeleteSink(DeleteSinkRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v2/{sink_name=projects/*/sinks/*}" };
+ }
+}
+
+// Describes a sink used to export log entries to one of the following
+// destinations in any project: a Cloud Storage bucket, a BigQuery dataset, or a
+// Cloud Pub/Sub topic. A logs filter controls which log entries are
+// exported. The sink must be created within a project, organization, billing
+// account, or folder.
+message LogSink {
+ // Available log entry formats. Log entries can be written to Stackdriver
+ // Logging in either format and can be exported in either format.
+ // Version 2 is the preferred format.
+ enum VersionFormat {
+ // An unspecified format version that will default to V2.
+ VERSION_FORMAT_UNSPECIFIED = 0;
+
+ // `LogEntry` version 2 format.
+ V2 = 1;
+
+ // `LogEntry` version 1 format.
+ V1 = 2;
+ }
+
+ // Required. The client-assigned sink identifier, unique within the
+ // project. Example: `"my-syslog-errors-to-pubsub"`. Sink identifiers are
+ // limited to 100 characters and can include only the following characters:
+ // upper and lower-case alphanumeric characters, underscores, hyphens, and
+ // periods.
+ string name = 1;
+
+ // Required. The export destination:
+ //
+ // "storage.googleapis.com/[GCS_BUCKET]"
+ // "bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]"
+ // "pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]"
+ //
+ // The sink's `writer_identity`, set when the sink is created, must
+ // have permission to write to the destination or else the log
+ // entries are not exported. For more information, see
+ // [Exporting Logs With Sinks](/logging/docs/api/tasks/exporting-logs).
+ string destination = 3;
+
+ // Optional.
+ // An [advanced logs filter](/logging/docs/view/advanced_filters). The only
+ // exported log entries are those that are in the resource owning the sink and
+ // that match the filter. The filter must use the log entry format specified
+ // by the `output_version_format` parameter. For example, in the v2 format:
+ //
+ // logName="projects/[PROJECT_ID]/logs/[LOG_ID]" AND severity>=ERROR
+ string filter = 5;
+
+ // Optional. The log entry format to use for this sink's exported log
+ // entries. The v2 format is used by default.
+ // **The v1 format is deprecated** and should be used only as part of a
+ // migration effort to v2.
+ // See [Migration to the v2 API](/logging/docs/api/v2/migration-to-v2).
+ VersionFormat output_version_format = 6;
+
+ // Output only. An IAM identity&mdash;a service account or group&mdash;under
+ // which Stackdriver Logging writes the exported log entries to the sink's
+ // destination. This field is set by
+ // [sinks.create](/logging/docs/api/reference/rest/v2/projects.sinks/create)
+ // and
+ // [sinks.update](/logging/docs/api/reference/rest/v2/projects.sinks/update),
+ // based on the setting of `unique_writer_identity` in those methods.
+ //
+ // Until you grant this identity write-access to the destination, log entry
+ // exports from this sink will fail. For more information,
+ // see [Granting access for a
+ // resource](/iam/docs/granting-roles-to-service-accounts#granting_access_to_a_service_account_for_a_resource).
+ // Consult the destination service's documentation to determine the
+ // appropriate IAM roles to assign to the identity.
+ string writer_identity = 8;
+
+ // Optional. The time at which this sink will begin exporting log entries.
+ // Log entries are exported only if their timestamp is not earlier than the
+ // start time. The default value of this field is the time the sink is
+ // created or updated.
+ google.protobuf.Timestamp start_time = 10;
+
+ // Optional. The time at which this sink will stop exporting log entries. Log
+ // entries are exported only if their timestamp is earlier than the end time.
+ // If this field is not supplied, there is no end time. If both a start time
+ // and an end time are provided, then the end time must be later than the
+ // start time.
+ google.protobuf.Timestamp end_time = 11;
+}
+
+// The parameters to `ListSinks`.
+message ListSinksRequest {
+ // Required. The parent resource whose sinks are to be listed:
+ //
+ // "projects/[PROJECT_ID]"
+ // "organizations/[ORGANIZATION_ID]"
+ // "billingAccounts/[BILLING_ACCOUNT_ID]"
+ // "folders/[FOLDER_ID]"
+ string parent = 1;
+
+ // Optional. If present, then retrieve the next batch of results from the
+ // preceding call to this method. `pageToken` must be the value of
+ // `nextPageToken` from the previous response. The values of other method
+ // parameters should be identical to those in the previous call.
+ string page_token = 2;
+
+ // Optional. The maximum number of results to return from this request.
+ // Non-positive values are ignored. The presence of `nextPageToken` in the
+ // response indicates that more results might be available.
+ int32 page_size = 3;
+}
+
+// Result returned from `ListSinks`.
+message ListSinksResponse {
+ // A list of sinks.
+ repeated LogSink sinks = 1;
+
+ // If there might be more results than appear in this response, then
+ // `nextPageToken` is included. To get the next set of results, call the same
+ // method again using the value of `nextPageToken` as `pageToken`.
+ string next_page_token = 2;
+}
+
+// The parameters to `GetSink`.
+message GetSinkRequest {
+ // Required. The resource name of the sink:
+ //
+ // "projects/[PROJECT_ID]/sinks/[SINK_ID]"
+ // "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]"
+ // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]"
+ // "folders/[FOLDER_ID]/sinks/[SINK_ID]"
+ //
+ // Example: `"projects/my-project-id/sinks/my-sink-id"`.
+ string sink_name = 1;
+}
+
+// The parameters to `CreateSink`.
+message CreateSinkRequest {
+ // Required. The resource in which to create the sink:
+ //
+ // "projects/[PROJECT_ID]"
+ // "organizations/[ORGANIZATION_ID]"
+ // "billingAccounts/[BILLING_ACCOUNT_ID]"
+ // "folders/[FOLDER_ID]"
+ //
+ // Examples: `"projects/my-logging-project"`, `"organizations/123456789"`.
+ string parent = 1;
+
+ // Required. The new sink, whose `name` parameter is a sink identifier that
+ // is not already in use.
+ LogSink sink = 2;
+
+ // Optional. Determines the kind of IAM identity returned as `writer_identity`
+ // in the new sink. If this value is omitted or set to false, and if the
+ // sink's parent is a project, then the value returned as `writer_identity` is
+ // the same group or service account used by Stackdriver Logging before the
+ // addition of writer identities to this API. The sink's destination must be
+ // in the same project as the sink itself.
+ //
+ // If this field is set to true, or if the sink is owned by a non-project
+ // resource such as an organization, then the value of `writer_identity` will
+ // be a unique service account used only for exports from the new sink. For
+ // more information, see `writer_identity` in [LogSink][google.logging.v2.LogSink].
+ bool unique_writer_identity = 3;
+}
+
+// The parameters to `UpdateSink`.
+message UpdateSinkRequest {
+ // Required. The full resource name of the sink to update, including the
+ // parent resource and the sink identifier:
+ //
+ // "projects/[PROJECT_ID]/sinks/[SINK_ID]"
+ // "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]"
+ // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]"
+ // "folders/[FOLDER_ID]/sinks/[SINK_ID]"
+ //
+ // Example: `"projects/my-project-id/sinks/my-sink-id"`.
+ string sink_name = 1;
+
+ // Required. The updated sink, whose name is the same identifier that appears
+ // as part of `sink_name`. If `sink_name` does not exist, then
+ // this method creates a new sink.
+ LogSink sink = 2;
+
+ // Optional. See
+ // [sinks.create](/logging/docs/api/reference/rest/v2/projects.sinks/create)
+ // for a description of this field. When updating a sink, the effect of this
+ // field on the value of `writer_identity` in the updated sink depends on both
+ // the old and new values of this field:
+ //
+ // + If the old and new values of this field are both false or both true,
+ // then there is no change to the sink's `writer_identity`.
+ // + If the old value is false and the new value is true, then
+ // `writer_identity` is changed to a unique service account.
+ // + It is an error if the old value is true and the new value is false.
+ bool unique_writer_identity = 3;
+}
+
+// The parameters to `DeleteSink`.
+message DeleteSinkRequest {
+ // Required. The full resource name of the sink to delete, including the
+ // parent resource and the sink identifier:
+ //
+ // "projects/[PROJECT_ID]/sinks/[SINK_ID]"
+ // "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]"
+ // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]"
+ // "folders/[FOLDER_ID]/sinks/[SINK_ID]"
+ //
+ // Example: `"projects/my-project-id/sinks/my-sink-id"`.
+ string sink_name = 1;
+}
diff --git a/third_party/googleapis/google/logging/v2/logging_gapic.yaml b/third_party/googleapis/google/logging/v2/logging_gapic.yaml
new file mode 100644
index 0000000000..14cd8d6a7c
--- /dev/null
+++ b/third_party/googleapis/google/logging/v2/logging_gapic.yaml
@@ -0,0 +1,472 @@
+type: com.google.api.codegen.ConfigProto
+language_settings:
+ java:
+ package_name: com.google.cloud.logging.spi.v2
+ interface_names:
+ google.logging.v2.ConfigServiceV2: Config
+ google.logging.v2.LoggingServiceV2: Logging
+ google.logging.v2.MetricsServiceV2: Metrics
+ python:
+ package_name: google.cloud.gapic.logging.v2
+ go:
+ package_name: cloud.google.com/go/logging/apiv2
+ domain_layer_location: cloud.google.com/go/logging
+ csharp:
+ package_name: Google.Cloud.Logging.V2
+ ruby:
+ package_name: Google::Cloud::Logging::V2
+ php:
+ package_name: Google\Cloud\Logging\V2
+ nodejs:
+ package_name: logging.v2
+ domain_layer_location: google-cloud
+license_header:
+ copyright_file: copyright-google.txt
+ license_file: license-header-apache-2.0.txt
+collections:
+- name_pattern: projects/{project}
+ entity_name: project
+- name_pattern: projects/{project}/logs/{log}
+ entity_name: log
+- name_pattern: projects/{project}/sinks/{sink}
+ entity_name: sink
+- name_pattern: projects/{project}/metrics/{metric}
+ entity_name: metric
+- name_pattern: organizations/{organization}
+ entity_name: organization
+- name_pattern: organizations/{organization}/logs/{log}
+ entity_name: organization_log
+- name_pattern: organizations/{organization}/sinks/{sink}
+ entity_name: organization_sink
+- name_pattern: folders/{folder}
+ entity_name: folder
+- name_pattern: folders/{folder}/logs/{log}
+ entity_name: folder_log
+- name_pattern: folders/{folder}/sinks/{sink}
+ entity_name: folder_sink
+- name_pattern: billingAccounts/{billing_account}
+ entity_name: billing
+- name_pattern: billingAccounts/{billing_account}/logs/{log}
+ entity_name: billing_log
+- name_pattern: billingAccounts/{billing_account}/sinks/{sink}
+ entity_name: billing_sink
+collection_oneofs:
+- oneof_name: parent_oneof
+ collection_names:
+ - project
+ - organization
+ - folder
+ - billing
+- oneof_name: sink_oneof
+ collection_names:
+ - sink
+ - organization_sink
+ - folder_sink
+ - billing_sink
+- oneof_name: log_oneof
+ collection_names:
+ - log
+ - organization_log
+ - folder_log
+ - billing_log
+- oneof_name: metric_oneof
+ collection_names:
+ - metric
+interfaces:
+- name: google.logging.v2.ConfigServiceV2
+ collections:
+ - name_pattern: projects/{project}
+ entity_name: project
+ - name_pattern: projects/{project}/sinks/{sink}
+ entity_name: sink
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.2
+ max_retry_delay_millis: 1000
+ initial_rpc_timeout_millis: 2000
+ rpc_timeout_multiplier: 1.5
+ max_rpc_timeout_millis: 30000
+ total_timeout_millis: 45000
+ methods:
+ - name: ListSinks
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ required_fields:
+ - parent
+ request_object_method: true
+ page_streaming:
+ request:
+ token_field: page_token
+ page_size_field: page_size
+ response:
+ token_field: next_page_token
+ resources_field: sinks
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ field_name_patterns:
+ parent: parent_oneof
+ resource_name_treatment: STATIC_TYPES
+ - name: GetSink
+ flattening:
+ groups:
+ - parameters:
+ - sink_name
+ required_fields:
+ - sink_name
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ field_name_patterns:
+ sink_name: sink_oneof
+ resource_name_treatment: STATIC_TYPES
+ - name: CreateSink
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - sink
+ required_fields:
+ - parent
+ - sink
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ field_name_patterns:
+ parent: parent_oneof
+ resource_name_treatment: STATIC_TYPES
+ - name: UpdateSink
+ flattening:
+ groups:
+ - parameters:
+ - sink_name
+ - sink
+ required_fields:
+ - sink_name
+ - sink
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ field_name_patterns:
+ sink_name: sink_oneof
+ resource_name_treatment: STATIC_TYPES
+ - name: DeleteSink
+ flattening:
+ groups:
+ - parameters:
+ - sink_name
+ required_fields:
+ - sink_name
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ field_name_patterns:
+ sink_name: sink_oneof
+ resource_name_treatment: STATIC_TYPES
+- name: google.logging.v2.MetricsServiceV2
+ collections:
+ - name_pattern: projects/{project}
+ entity_name: project
+ - name_pattern: projects/{project}/metrics/{metric}
+ entity_name: metric
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.2
+ max_retry_delay_millis: 1000
+ initial_rpc_timeout_millis: 2000
+ rpc_timeout_multiplier: 1.5
+ max_rpc_timeout_millis: 30000
+ total_timeout_millis: 45000
+ methods:
+ - name: ListLogMetrics
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ required_fields:
+ - parent
+ request_object_method: true
+ page_streaming:
+ request:
+ token_field: page_token
+ page_size_field: page_size
+ response:
+ token_field: next_page_token
+ resources_field: metrics
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ field_name_patterns:
+ parent: parent_oneof
+ resource_name_treatment: STATIC_TYPES
+ - name: GetLogMetric
+ flattening:
+ groups:
+ - parameters:
+ - metric_name
+ required_fields:
+ - metric_name
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ field_name_patterns:
+ metric_name: metric_oneof
+ resource_name_treatment: STATIC_TYPES
+ - name: CreateLogMetric
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - metric
+ required_fields:
+ - parent
+ - metric
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ field_name_patterns:
+ parent: parent_oneof
+ resource_name_treatment: STATIC_TYPES
+ - name: UpdateLogMetric
+ flattening:
+ groups:
+ - parameters:
+ - metric_name
+ - metric
+ required_fields:
+ - metric_name
+ - metric
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ field_name_patterns:
+ metric_name: metric_oneof
+ resource_name_treatment: STATIC_TYPES
+ - name: DeleteLogMetric
+ flattening:
+ groups:
+ - parameters:
+ - metric_name
+ required_fields:
+ - metric_name
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ field_name_patterns:
+ metric_name: metric_oneof
+ resource_name_treatment: STATIC_TYPES
+- name: google.logging.v2.LoggingServiceV2
+ smoke_test:
+ method: WriteLogEntries
+ init_fields:
+ - entries
+ - log_name%project=$PROJECT_ID
+ - log_name%log="test-$RANDOM"
+ collections:
+ - name_pattern: projects/{project}
+ entity_name: project
+ - name_pattern: projects/{project}/logs/{log}
+ entity_name: log
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.2
+ max_retry_delay_millis: 1000
+ initial_rpc_timeout_millis: 2000
+ rpc_timeout_multiplier: 1.5
+ max_rpc_timeout_millis: 30000
+ total_timeout_millis: 45000
+ - name: list
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.2
+ max_retry_delay_millis: 1000
+ initial_rpc_timeout_millis: 7000
+ rpc_timeout_multiplier: 1.5
+ max_rpc_timeout_millis: 30000
+ total_timeout_millis: 45000
+ methods:
+ - name: DeleteLog
+ flattening:
+ groups:
+ - parameters:
+ - log_name
+ required_fields:
+ - log_name
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ field_name_patterns:
+ log_name: log_oneof
+ resource_name_treatment: STATIC_TYPES
+ - name: WriteLogEntries
+ flattening:
+ groups:
+ - parameters:
+ - log_name
+ - resource
+ - labels
+ - entries
+ required_fields:
+ - entries
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ field_name_patterns:
+ log_name: log_oneof
+ resource_name_treatment: STATIC_TYPES
+ batching:
+ thresholds:
+ element_count_threshold: 1000
+ request_byte_threshold: 1048576 # 1 MiB
+ delay_threshold_millis: 50
+ flow_control_element_limit: 100000
+ flow_control_byte_limit: 10485760 # 10 MiB
+ flow_control_limit_exceeded_behavior: THROW_EXCEPTION
+ batch_descriptor:
+ batched_field: entries
+ discriminator_fields:
+ - log_name
+ - resource
+ - labels
+ - name: ListLogEntries
+ flattening:
+ groups:
+ - parameters:
+ - resource_names
+ - filter
+ - order_by
+ required_fields:
+ - resource_names
+ request_object_method: true
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: entries
+ retry_codes_name: idempotent
+ retry_params_name: list
+ timeout_millis: 30000
+ resource_name_treatment: STATIC_TYPES
+ - name: ListMonitoredResourceDescriptors
+ request_object_method: true
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: resource_descriptors
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 30000
+ resource_name_treatment: STATIC_TYPES
+ - name: ListLogs
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ required_fields:
+ - parent
+ request_object_method: true
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: log_names
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: parent_oneof
+ timeout_millis: 30000
+ resource_name_treatment: STATIC_TYPES
+resource_name_generation:
+- message_name: DeleteLogRequest
+ field_entity_map:
+ log_name: log_oneof
+- message_name: WriteLogEntriesRequest
+ field_entity_map:
+ log_name: log_oneof
+- message_name: ListLogEntriesRequest
+ field_entity_map:
+ log_name: log_oneof
+- message_name: LogEntry
+ field_entity_map:
+ log_name: log_oneof
+- message_name: LogSink
+ field_entity_map:
+ destination: "*"
+- message_name: ListSinksRequest
+ field_entity_map:
+ parent: parent_oneof
+- message_name: GetSinkRequest
+ field_entity_map:
+ sink_name: sink_oneof
+- message_name: CreateSinkRequest
+ field_entity_map:
+ parent: parent_oneof
+- message_name: UpdateSinkRequest
+ field_entity_map:
+ sink_name: sink_oneof
+- message_name: DeleteSinkRequest
+ field_entity_map:
+ sink_name: sink_oneof
+- message_name: ListLogMetricsRequest
+ field_entity_map:
+ parent: parent_oneof
+- message_name: GetLogMetricRequest
+ field_entity_map:
+ metric_name: metric_oneof
+- message_name: CreateLogMetricRequest
+ field_entity_map:
+ parent: parent_oneof
+- message_name: UpdateLogMetricRequest
+ field_entity_map:
+ metric_name: metric_oneof
+- message_name: DeleteLogMetricRequest
+ field_entity_map:
+ metric_name: metric_oneof
+- message_name: ListLogsRequest
+ field_entity_map:
+ parent: parent_oneof
diff --git a/third_party/googleapis/google/logging/v2/logging_metrics.proto b/third_party/googleapis/google/logging/v2/logging_metrics.proto
new file mode 100644
index 0000000000..22737da9b2
--- /dev/null
+++ b/third_party/googleapis/google/logging/v2/logging_metrics.proto
@@ -0,0 +1,179 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.logging.v2;
+
+import "google/api/annotations.proto";
+import "google/api/distribution.proto";
+import "google/api/metric.proto";
+import "google/protobuf/empty.proto";
+
+option csharp_namespace = "Google.Cloud.Logging.V2";
+option go_package = "google.golang.org/genproto/googleapis/logging/v2;logging";
+option java_multiple_files = true;
+option java_outer_classname = "LoggingMetricsProto";
+option java_package = "com.google.logging.v2";
+
+
+// Service for configuring logs-based metrics.
+service MetricsServiceV2 {
+ // Lists logs-based metrics.
+ rpc ListLogMetrics(ListLogMetricsRequest) returns (ListLogMetricsResponse) {
+ option (google.api.http) = { get: "/v2/{parent=projects/*}/metrics" };
+ }
+
+ // Gets a logs-based metric.
+ rpc GetLogMetric(GetLogMetricRequest) returns (LogMetric) {
+ option (google.api.http) = { get: "/v2/{metric_name=projects/*/metrics/*}" };
+ }
+
+ // Creates a logs-based metric.
+ rpc CreateLogMetric(CreateLogMetricRequest) returns (LogMetric) {
+ option (google.api.http) = { post: "/v2/{parent=projects/*}/metrics" body: "metric" };
+ }
+
+ // Creates or updates a logs-based metric.
+ rpc UpdateLogMetric(UpdateLogMetricRequest) returns (LogMetric) {
+ option (google.api.http) = { put: "/v2/{metric_name=projects/*/metrics/*}" body: "metric" };
+ }
+
+ // Deletes a logs-based metric.
+ rpc DeleteLogMetric(DeleteLogMetricRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v2/{metric_name=projects/*/metrics/*}" };
+ }
+}
+
+// Describes a logs-based metric. The value of the metric is the
+// number of log entries that match a logs filter in a given time interval.
+message LogMetric {
+ // Stackdriver Logging API version.
+ enum ApiVersion {
+ // Stackdriver Logging API v2.
+ V2 = 0;
+
+ // Stackdriver Logging API v1.
+ V1 = 1;
+ }
+
+ // Required. The client-assigned metric identifier.
+ // Examples: `"error_count"`, `"nginx/requests"`.
+ //
+ // Metric identifiers are limited to 100 characters and can include
+ // only the following characters: `A-Z`, `a-z`, `0-9`, and the
+ // special characters `_-.,+!*',()%/`. The forward-slash character
+ // (`/`) denotes a hierarchy of name pieces, and it cannot be the
+ // first character of the name.
+ //
+ // The metric identifier in this field must not be
+ // [URL-encoded](https://en.wikipedia.org/wiki/Percent-encoding).
+ // However, when the metric identifier appears as the `[METRIC_ID]`
+ // part of a `metric_name` API parameter, then the metric identifier
+ // must be URL-encoded. Example:
+ // `"projects/my-project/metrics/nginx%2Frequests"`.
+ string name = 1;
+
+ // Optional. A description of this metric, which is used in documentation.
+ string description = 2;
+
+ // Required. An [advanced logs filter](/logging/docs/view/advanced_filters)
+ // which is used to match log entries.
+ // Example:
+ //
+ // "resource.type=gae_app AND severity>=ERROR"
+ //
+ // The maximum length of the filter is 20000 characters.
+ string filter = 3;
+
+ // Output only. The API version that created or updated this metric.
+ // The version also dictates the syntax of the filter expression. When a value
+ // for this field is missing, the default value of V2 should be assumed.
+ ApiVersion version = 4;
+}
+
+// The parameters to ListLogMetrics.
+message ListLogMetricsRequest {
+ // Required. The name of the project containing the metrics:
+ //
+ // "projects/[PROJECT_ID]"
+ string parent = 1;
+
+ // Optional. If present, then retrieve the next batch of results from the
+ // preceding call to this method. `pageToken` must be the value of
+ // `nextPageToken` from the previous response. The values of other method
+ // parameters should be identical to those in the previous call.
+ string page_token = 2;
+
+ // Optional. The maximum number of results to return from this request.
+ // Non-positive values are ignored. The presence of `nextPageToken` in the
+ // response indicates that more results might be available.
+ int32 page_size = 3;
+}
+
+// Result returned from ListLogMetrics.
+message ListLogMetricsResponse {
+ // A list of logs-based metrics.
+ repeated LogMetric metrics = 1;
+
+ // If there might be more results than appear in this response, then
+ // `nextPageToken` is included. To get the next set of results, call this
+ // method again using the value of `nextPageToken` as `pageToken`.
+ string next_page_token = 2;
+}
+
+// The parameters to GetLogMetric.
+message GetLogMetricRequest {
+ // The resource name of the desired metric:
+ //
+ // "projects/[PROJECT_ID]/metrics/[METRIC_ID]"
+ string metric_name = 1;
+}
+
+// The parameters to CreateLogMetric.
+message CreateLogMetricRequest {
+ // The resource name of the project in which to create the metric:
+ //
+ // "projects/[PROJECT_ID]"
+ //
+ // The new metric must be provided in the request.
+ string parent = 1;
+
+ // The new logs-based metric, which must not have an identifier that
+ // already exists.
+ LogMetric metric = 2;
+}
+
+// The parameters to UpdateLogMetric.
+message UpdateLogMetricRequest {
+ // The resource name of the metric to update:
+ //
+ // "projects/[PROJECT_ID]/metrics/[METRIC_ID]"
+ //
+ // The updated metric must be provided in the request and it's
+ // `name` field must be the same as `[METRIC_ID]` If the metric
+ // does not exist in `[PROJECT_ID]`, then a new metric is created.
+ string metric_name = 1;
+
+ // The updated metric.
+ LogMetric metric = 2;
+}
+
+// The parameters to DeleteLogMetric.
+message DeleteLogMetricRequest {
+ // The resource name of the metric to delete:
+ //
+ // "projects/[PROJECT_ID]/metrics/[METRIC_ID]"
+ string metric_name = 1;
+}
diff --git a/third_party/googleapis/google/longrunning/README.md b/third_party/googleapis/google/longrunning/README.md
new file mode 100644
index 0000000000..905642277f
--- /dev/null
+++ b/third_party/googleapis/google/longrunning/README.md
@@ -0,0 +1,5 @@
+# Google Long Running Operations API
+
+This package contains the definition of an abstract interface that
+manages long running operations with API services. See
+[google.longrunning.Operations][] for details. \ No newline at end of file
diff --git a/third_party/googleapis/google/longrunning/longrunning.yaml b/third_party/googleapis/google/longrunning/longrunning.yaml
new file mode 100644
index 0000000000..74543bc0aa
--- /dev/null
+++ b/third_party/googleapis/google/longrunning/longrunning.yaml
@@ -0,0 +1,12 @@
+type: google.api.Service
+config_version: 1
+name: longrunning.googleapis.com
+title: Google Long Running Operations API
+
+apis:
+- name: google.longrunning.Operations
+
+# Documentation section
+documentation:
+ overview:
+ (== include google/longrunning/README.md ==)
diff --git a/third_party/googleapis/google/longrunning/longrunning_gapic.yaml b/third_party/googleapis/google/longrunning/longrunning_gapic.yaml
new file mode 100644
index 0000000000..4d36e5122f
--- /dev/null
+++ b/third_party/googleapis/google/longrunning/longrunning_gapic.yaml
@@ -0,0 +1,98 @@
+type: com.google.api.codegen.ConfigProto
+language_settings:
+ java:
+ package_name: com.google.longrunning
+ python:
+ package_name: google.gapic.longrunning
+ go:
+ package_name: cloud.google.com/go/longrunning/autogen
+ domain_layer_location: cloud.google.com/go/longrunning
+ license_header_override:
+ license_file: license-header-apache-2.0.txt
+ csharp:
+ package_name: Google.LongRunning
+ ruby:
+ package_name: Google::Longrunning
+ php:
+ package_name: Google\GAX\LongRunning
+ nodejs:
+ package_name: longrunning
+license_header:
+ copyright_file: copyright-google.txt
+ license_file: license-header-bsd-3-clause.txt
+interfaces:
+- name: google.longrunning.Operations
+ required_constructor_params:
+ - service_address
+ - scopes
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 20000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 20000
+ total_timeout_millis: 600000
+ methods:
+ - name: GetOperation
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: ListOperations
+ flattening:
+ groups:
+ - parameters:
+ - name
+ - filter
+ required_fields:
+ - name
+ - filter
+ request_object_method: true
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: operations
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: CancelOperation
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: DeleteOperation
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
diff --git a/third_party/googleapis/google/longrunning/operations.proto b/third_party/googleapis/google/longrunning/operations.proto
new file mode 100644
index 0000000000..2fb7a31ad7
--- /dev/null
+++ b/third_party/googleapis/google/longrunning/operations.proto
@@ -0,0 +1,159 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.longrunning;
+
+import "google/api/annotations.proto";
+import "google/protobuf/any.proto";
+import "google/protobuf/empty.proto";
+import "google/rpc/status.proto";
+
+option csharp_namespace = "Google.LongRunning";
+option go_package = "google.golang.org/genproto/googleapis/longrunning;longrunning";
+option java_multiple_files = true;
+option java_outer_classname = "OperationsProto";
+option java_package = "com.google.longrunning";
+
+
+// Manages long-running operations with an API service.
+//
+// When an API method normally takes long time to complete, it can be designed
+// to return [Operation][google.longrunning.Operation] to the client, and the client can use this
+// interface to receive the real response asynchronously by polling the
+// operation resource, or pass the operation resource to another API (such as
+// Google Cloud Pub/Sub API) to receive the response. Any API service that
+// returns long-running operations should implement the `Operations` interface
+// so developers can have a consistent client experience.
+service Operations {
+ // Lists operations that match the specified filter in the request. If the
+ // server doesn't support this method, it returns `UNIMPLEMENTED`.
+ //
+ // NOTE: the `name` binding below allows API services to override the binding
+ // to use different resource name schemes, such as `users/*/operations`.
+ rpc ListOperations(ListOperationsRequest) returns (ListOperationsResponse) {
+ option (google.api.http) = { get: "/v1/{name=operations}" };
+ }
+
+ // Gets the latest state of a long-running operation. Clients can use this
+ // method to poll the operation result at intervals as recommended by the API
+ // service.
+ rpc GetOperation(GetOperationRequest) returns (Operation) {
+ option (google.api.http) = { get: "/v1/{name=operations/**}" };
+ }
+
+ // Deletes a long-running operation. This method indicates that the client is
+ // no longer interested in the operation result. It does not cancel the
+ // operation. If the server doesn't support this method, it returns
+ // `google.rpc.Code.UNIMPLEMENTED`.
+ rpc DeleteOperation(DeleteOperationRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/{name=operations/**}" };
+ }
+
+ // Starts asynchronous cancellation on a long-running operation. The server
+ // makes a best effort to cancel the operation, but success is not
+ // guaranteed. If the server doesn't support this method, it returns
+ // `google.rpc.Code.UNIMPLEMENTED`. Clients can use
+ // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
+ // other methods to check whether the cancellation succeeded or whether the
+ // operation completed despite cancellation. On successful cancellation,
+ // the operation is not deleted; instead, it becomes an operation with
+ // an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
+ // corresponding to `Code.CANCELLED`.
+ rpc CancelOperation(CancelOperationRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { post: "/v1/{name=operations/**}:cancel" body: "*" };
+ }
+}
+
+// This resource represents a long-running operation that is the result of a
+// network API call.
+message Operation {
+ // The server-assigned name, which is only unique within the same service that
+ // originally returns it. If you use the default HTTP mapping, the
+ // `name` should have the format of `operations/some/unique/name`.
+ string name = 1;
+
+ // Service-specific metadata associated with the operation. It typically
+ // contains progress information and common metadata such as create time.
+ // Some services might not provide such metadata. Any method that returns a
+ // long-running operation should document the metadata type, if any.
+ google.protobuf.Any metadata = 2;
+
+ // If the value is `false`, it means the operation is still in progress.
+ // If true, the operation is completed, and either `error` or `response` is
+ // available.
+ bool done = 3;
+
+ // The operation result, which can be either an `error` or a valid `response`.
+ // If `done` == `false`, neither `error` nor `response` is set.
+ // If `done` == `true`, exactly one of `error` or `response` is set.
+ oneof result {
+ // The error result of the operation in case of failure or cancellation.
+ google.rpc.Status error = 4;
+
+ // The normal response of the operation in case of success. If the original
+ // method returns no data on success, such as `Delete`, the response is
+ // `google.protobuf.Empty`. If the original method is standard
+ // `Get`/`Create`/`Update`, the response should be the resource. For other
+ // methods, the response should have the type `XxxResponse`, where `Xxx`
+ // is the original method name. For example, if the original method name
+ // is `TakeSnapshot()`, the inferred response type is
+ // `TakeSnapshotResponse`.
+ google.protobuf.Any response = 5;
+ }
+}
+
+// The request message for [Operations.GetOperation][google.longrunning.Operations.GetOperation].
+message GetOperationRequest {
+ // The name of the operation resource.
+ string name = 1;
+}
+
+// The request message for [Operations.ListOperations][google.longrunning.Operations.ListOperations].
+message ListOperationsRequest {
+ // The name of the operation collection.
+ string name = 4;
+
+ // The standard list filter.
+ string filter = 1;
+
+ // The standard list page size.
+ int32 page_size = 2;
+
+ // The standard list page token.
+ string page_token = 3;
+}
+
+// The response message for [Operations.ListOperations][google.longrunning.Operations.ListOperations].
+message ListOperationsResponse {
+ // A list of operations that matches the specified filter in the request.
+ repeated Operation operations = 1;
+
+ // The standard List next-page token.
+ string next_page_token = 2;
+}
+
+// The request message for [Operations.CancelOperation][google.longrunning.Operations.CancelOperation].
+message CancelOperationRequest {
+ // The name of the operation resource to be cancelled.
+ string name = 1;
+}
+
+// The request message for [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation].
+message DeleteOperationRequest {
+ // The name of the operation resource to be deleted.
+ string name = 1;
+}
+
diff --git a/third_party/googleapis/google/monitoring/monitoring.yaml b/third_party/googleapis/google/monitoring/monitoring.yaml
new file mode 100644
index 0000000000..44bd40b1af
--- /dev/null
+++ b/third_party/googleapis/google/monitoring/monitoring.yaml
@@ -0,0 +1,83 @@
+type: google.api.Service
+config_version: 2
+name: monitoring.googleapis.com
+title: Stackdriver Monitoring API
+
+apis:
+- name: google.monitoring.v3.GroupService
+- name: google.monitoring.v3.MetricService
+
+documentation:
+ summary: >
+ Manages your Stackdriver Monitoring data and configurations.
+ Most projects must be associated with a Stackdriver account, with a few
+ exceptions as noted on the individual method pages.
+ rules:
+ - selector: google.monitoring.v3.MetricService.ListMonitoredResourceDescriptors
+ description: >
+ Lists monitored resource descriptors that match a filter.
+ This method does not require a Stackdriver account.
+ - selector: google.monitoring.v3.MetricService.GetMonitoredResourceDescriptor
+ description: >
+ Gets a single monitored resource descriptor.
+ This method does not require a Stackdriver account.
+ - selector: google.monitoring.v3.MetricService.ListMetricDescriptors
+ description: >
+ Lists metric descriptors that match a filter.
+ This method does not require a Stackdriver account.
+ - selector: google.monitoring.v3.MetricService.GetMetricDescriptor
+ description: >
+ Gets a single metric descriptor.
+ This method does not require a Stackdriver account.
+ - selector: google.monitoring.v3.MetricService.ListTimeSeries
+ description: >
+ Lists time series that match a filter.
+ This method does not require a Stackdriver account.
+
+authentication:
+ rules:
+ # Query functions that are also required by publish-only users.
+ - selector: |-
+ google.monitoring.v3.MetricService.ListMonitoredResourceDescriptors,
+ google.monitoring.v3.MetricService.GetMonitoredResourceDescriptor,
+ google.monitoring.v3.MetricService.ListMetricDescriptors,
+ google.monitoring.v3.MetricService.GetMetricDescriptor
+ oauth:
+ canonical_scopes: |-
+ https://www.googleapis.com/auth/cloud-platform,
+ https://www.googleapis.com/auth/monitoring,
+ https://www.googleapis.com/auth/monitoring.read,
+ https://www.googleapis.com/auth/monitoring.write
+
+ # Write operations that are allowed for publish-only users.
+ - selector: |-
+ google.monitoring.v3.MetricService.CreateMetricDescriptor,
+ google.monitoring.v3.MetricService.CreateTimeSeries
+ oauth:
+ canonical_scopes: |-
+ https://www.googleapis.com/auth/cloud-platform,
+ https://www.googleapis.com/auth/monitoring,
+ https://www.googleapis.com/auth/monitoring.write
+
+ # Query operations that are *not* permitted by publish-only users.
+ - selector: |-
+ google.monitoring.v3.GroupService.ListGroups,
+ google.monitoring.v3.GroupService.GetGroup,
+ google.monitoring.v3.GroupService.ListGroupMembers,
+ google.monitoring.v3.MetricService.ListTimeSeries
+ oauth:
+ canonical_scopes: |-
+ https://www.googleapis.com/auth/cloud-platform,
+ https://www.googleapis.com/auth/monitoring,
+ https://www.googleapis.com/auth/monitoring.read
+
+ # Operations that require full access.
+ - selector: |-
+ google.monitoring.v3.GroupService.CreateGroup,
+ google.monitoring.v3.GroupService.UpdateGroup,
+ google.monitoring.v3.GroupService.DeleteGroup,
+ google.monitoring.v3.MetricService.DeleteMetricDescriptor
+ oauth:
+ canonical_scopes: |-
+ https://www.googleapis.com/auth/cloud-platform,
+ https://www.googleapis.com/auth/monitoring
diff --git a/third_party/googleapis/google/monitoring/v3/common.proto b/third_party/googleapis/google/monitoring/v3/common.proto
new file mode 100644
index 0000000000..c8f701eeaa
--- /dev/null
+++ b/third_party/googleapis/google/monitoring/v3/common.proto
@@ -0,0 +1,323 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.monitoring.v3;
+
+import "google/api/distribution.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/timestamp.proto";
+
+option csharp_namespace = "Google.Cloud.Monitoring.V3";
+option go_package = "google.golang.org/genproto/googleapis/monitoring/v3;monitoring";
+option java_multiple_files = true;
+option java_outer_classname = "CommonProto";
+option java_package = "com.google.monitoring.v3";
+
+
+// A single strongly-typed value.
+message TypedValue {
+ // The typed value field.
+ oneof value {
+ // A Boolean value: `true` or `false`.
+ bool bool_value = 1;
+
+ // A 64-bit integer. Its range is approximately &plusmn;9.2x10<sup>18</sup>.
+ int64 int64_value = 2;
+
+ // A 64-bit double-precision floating-point number. Its magnitude
+ // is approximately &plusmn;10<sup>&plusmn;300</sup> and it has 16
+ // significant digits of precision.
+ double double_value = 3;
+
+ // A variable-length string value.
+ string string_value = 4;
+
+ // A distribution value.
+ google.api.Distribution distribution_value = 5;
+ }
+}
+
+// A time interval extending just after a start time through an end time.
+// If the start time is the same as the end time, then the interval
+// represents a single point in time.
+message TimeInterval {
+ // Required. The end of the time interval.
+ google.protobuf.Timestamp end_time = 2;
+
+ // Optional. The beginning of the time interval. The default value
+ // for the start time is the end time. The start time must not be
+ // later than the end time.
+ google.protobuf.Timestamp start_time = 1;
+}
+
+// Describes how to combine multiple time series to provide different views of
+// the data. Aggregation consists of an alignment step on individual time
+// series (`per_series_aligner`) followed by an optional reduction of the data
+// across different time series (`cross_series_reducer`). For more details, see
+// [Aggregation](/monitoring/api/learn_more#aggregation).
+message Aggregation {
+ // The Aligner describes how to bring the data points in a single
+ // time series into temporal alignment.
+ enum Aligner {
+ // No alignment. Raw data is returned. Not valid if cross-time
+ // series reduction is requested. The value type of the result is
+ // the same as the value type of the input.
+ ALIGN_NONE = 0;
+
+ // Align and convert to delta metric type. This alignment is valid
+ // for cumulative metrics and delta metrics. Aligning an existing
+ // delta metric to a delta metric requires that the alignment
+ // period be increased. The value type of the result is the same
+ // as the value type of the input.
+ ALIGN_DELTA = 1;
+
+ // Align and convert to a rate. This alignment is valid for
+ // cumulative metrics and delta metrics with numeric values. The output is a
+ // gauge metric with value type
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ ALIGN_RATE = 2;
+
+ // Align by interpolating between adjacent points around the
+ // period boundary. This alignment is valid for gauge
+ // metrics with numeric values. The value type of the result is the same
+ // as the value type of the input.
+ ALIGN_INTERPOLATE = 3;
+
+ // Align by shifting the oldest data point before the period
+ // boundary to the boundary. This alignment is valid for gauge
+ // metrics. The value type of the result is the same as the
+ // value type of the input.
+ ALIGN_NEXT_OLDER = 4;
+
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the minimum of all data points in the
+ // period. This alignment is valid for gauge and delta metrics with numeric
+ // values. The value type of the result is the same as the value
+ // type of the input.
+ ALIGN_MIN = 10;
+
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the maximum of all data points in the
+ // period. This alignment is valid for gauge and delta metrics with numeric
+ // values. The value type of the result is the same as the value
+ // type of the input.
+ ALIGN_MAX = 11;
+
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the average or arithmetic mean of all
+ // data points in the period. This alignment is valid for gauge and delta
+ // metrics with numeric values. The value type of the output is
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ ALIGN_MEAN = 12;
+
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the count of all data points in the
+ // period. This alignment is valid for gauge and delta metrics with numeric
+ // or Boolean values. The value type of the output is
+ // [INT64][google.api.MetricDescriptor.ValueType.INT64].
+ ALIGN_COUNT = 13;
+
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the sum of all data points in the
+ // period. This alignment is valid for gauge and delta metrics with numeric
+ // and distribution values. The value type of the output is the
+ // same as the value type of the input.
+ ALIGN_SUM = 14;
+
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the standard deviation of all data
+ // points in the period. This alignment is valid for gauge and delta metrics
+ // with numeric values. The value type of the output is
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ ALIGN_STDDEV = 15;
+
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the count of True-valued data points in the
+ // period. This alignment is valid for gauge metrics with
+ // Boolean values. The value type of the output is
+ // [INT64][google.api.MetricDescriptor.ValueType.INT64].
+ ALIGN_COUNT_TRUE = 16;
+
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the fraction of True-valued data points in the
+ // period. This alignment is valid for gauge metrics with Boolean values.
+ // The output value is in the range [0, 1] and has value type
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ ALIGN_FRACTION_TRUE = 17;
+
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the 99th percentile of all data
+ // points in the period. This alignment is valid for gauge and delta metrics
+ // with distribution values. The output is a gauge metric with value type
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ ALIGN_PERCENTILE_99 = 18;
+
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the 95th percentile of all data
+ // points in the period. This alignment is valid for gauge and delta metrics
+ // with distribution values. The output is a gauge metric with value type
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ ALIGN_PERCENTILE_95 = 19;
+
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the 50th percentile of all data
+ // points in the period. This alignment is valid for gauge and delta metrics
+ // with distribution values. The output is a gauge metric with value type
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ ALIGN_PERCENTILE_50 = 20;
+
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the 5th percentile of all data
+ // points in the period. This alignment is valid for gauge and delta metrics
+ // with distribution values. The output is a gauge metric with value type
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ ALIGN_PERCENTILE_05 = 21;
+ }
+
+ // A Reducer describes how to aggregate data points from multiple
+ // time series into a single time series.
+ enum Reducer {
+ // No cross-time series reduction. The output of the aligner is
+ // returned.
+ REDUCE_NONE = 0;
+
+ // Reduce by computing the mean across time series for each
+ // alignment period. This reducer is valid for delta and
+ // gauge metrics with numeric or distribution values. The value type of the
+ // output is [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ REDUCE_MEAN = 1;
+
+ // Reduce by computing the minimum across time series for each
+ // alignment period. This reducer is valid for delta and
+ // gauge metrics with numeric values. The value type of the output
+ // is the same as the value type of the input.
+ REDUCE_MIN = 2;
+
+ // Reduce by computing the maximum across time series for each
+ // alignment period. This reducer is valid for delta and
+ // gauge metrics with numeric values. The value type of the output
+ // is the same as the value type of the input.
+ REDUCE_MAX = 3;
+
+ // Reduce by computing the sum across time series for each
+ // alignment period. This reducer is valid for delta and
+ // gauge metrics with numeric and distribution values. The value type of
+ // the output is the same as the value type of the input.
+ REDUCE_SUM = 4;
+
+ // Reduce by computing the standard deviation across time series
+ // for each alignment period. This reducer is valid for delta
+ // and gauge metrics with numeric or distribution values. The value type of
+ // the output is [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ REDUCE_STDDEV = 5;
+
+ // Reduce by computing the count of data points across time series
+ // for each alignment period. This reducer is valid for delta
+ // and gauge metrics of numeric, Boolean, distribution, and string value
+ // type. The value type of the output is
+ // [INT64][google.api.MetricDescriptor.ValueType.INT64].
+ REDUCE_COUNT = 6;
+
+ // Reduce by computing the count of True-valued data points across time
+ // series for each alignment period. This reducer is valid for delta
+ // and gauge metrics of Boolean value type. The value type of
+ // the output is [INT64][google.api.MetricDescriptor.ValueType.INT64].
+ REDUCE_COUNT_TRUE = 7;
+
+ // Reduce by computing the fraction of True-valued data points across time
+ // series for each alignment period. This reducer is valid for delta
+ // and gauge metrics of Boolean value type. The output value is in the
+ // range [0, 1] and has value type
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ REDUCE_FRACTION_TRUE = 8;
+
+ // Reduce by computing 99th percentile of data points across time series
+ // for each alignment period. This reducer is valid for gauge and delta
+ // metrics of numeric and distribution type. The value of the output is
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]
+ REDUCE_PERCENTILE_99 = 9;
+
+ // Reduce by computing 95th percentile of data points across time series
+ // for each alignment period. This reducer is valid for gauge and delta
+ // metrics of numeric and distribution type. The value of the output is
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]
+ REDUCE_PERCENTILE_95 = 10;
+
+ // Reduce by computing 50th percentile of data points across time series
+ // for each alignment period. This reducer is valid for gauge and delta
+ // metrics of numeric and distribution type. The value of the output is
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]
+ REDUCE_PERCENTILE_50 = 11;
+
+ // Reduce by computing 5th percentile of data points across time series
+ // for each alignment period. This reducer is valid for gauge and delta
+ // metrics of numeric and distribution type. The value of the output is
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]
+ REDUCE_PERCENTILE_05 = 12;
+ }
+
+ // The alignment period for per-[time series][google.monitoring.v3.TimeSeries]
+ // alignment. If present, `alignmentPeriod` must be at least 60
+ // seconds. After per-time series alignment, each time series will
+ // contain data points only on the period boundaries. If
+ // `perSeriesAligner` is not specified or equals `ALIGN_NONE`, then
+ // this field is ignored. If `perSeriesAligner` is specified and
+ // does not equal `ALIGN_NONE`, then this field must be defined;
+ // otherwise an error is returned.
+ google.protobuf.Duration alignment_period = 1;
+
+ // The approach to be used to align individual time series. Not all
+ // alignment functions may be applied to all time series, depending
+ // on the metric type and value type of the original time
+ // series. Alignment may change the metric type or the value type of
+ // the time series.
+ //
+ // Time series data must be aligned in order to perform cross-time
+ // series reduction. If `crossSeriesReducer` is specified, then
+ // `perSeriesAligner` must be specified and not equal `ALIGN_NONE`
+ // and `alignmentPeriod` must be specified; otherwise, an error is
+ // returned.
+ Aligner per_series_aligner = 2;
+
+ // The approach to be used to combine time series. Not all reducer
+ // functions may be applied to all time series, depending on the
+ // metric type and the value type of the original time
+ // series. Reduction may change the metric type of value type of the
+ // time series.
+ //
+ // Time series data must be aligned in order to perform cross-time
+ // series reduction. If `crossSeriesReducer` is specified, then
+ // `perSeriesAligner` must be specified and not equal `ALIGN_NONE`
+ // and `alignmentPeriod` must be specified; otherwise, an error is
+ // returned.
+ Reducer cross_series_reducer = 4;
+
+ // The set of fields to preserve when `crossSeriesReducer` is
+ // specified. The `groupByFields` determine how the time series are
+ // partitioned into subsets prior to applying the aggregation
+ // function. Each subset contains time series that have the same
+ // value for each of the grouping fields. Each individual time
+ // series is a member of exactly one subset. The
+ // `crossSeriesReducer` is applied to each subset of time series.
+ // It is not possible to reduce across different resource types, so
+ // this field implicitly contains `resource.type`. Fields not
+ // specified in `groupByFields` are aggregated away. If
+ // `groupByFields` is not specified and all the time series have
+ // the same resource type, then the time series are aggregated into
+ // a single output time series. If `crossSeriesReducer` is not
+ // defined, this field is ignored.
+ repeated string group_by_fields = 5;
+}
diff --git a/third_party/googleapis/google/monitoring/v3/group.proto b/third_party/googleapis/google/monitoring/v3/group.proto
new file mode 100644
index 0000000000..b6a6cfc45c
--- /dev/null
+++ b/third_party/googleapis/google/monitoring/v3/group.proto
@@ -0,0 +1,74 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.monitoring.v3;
+
+option csharp_namespace = "Google.Cloud.Monitoring.V3";
+option go_package = "google.golang.org/genproto/googleapis/monitoring/v3;monitoring";
+option java_multiple_files = true;
+option java_outer_classname = "GroupProto";
+option java_package = "com.google.monitoring.v3";
+
+
+// The description of a dynamic collection of monitored resources. Each group
+// has a filter that is matched against monitored resources and their associated
+// metadata. If a group's filter matches an available monitored resource, then
+// that resource is a member of that group. Groups can contain any number of
+// monitored resources, and each monitored resource can be a member of any
+// number of groups.
+//
+// Groups can be nested in parent-child hierarchies. The `parentName` field
+// identifies an optional parent for each group. If a group has a parent, then
+// the only monitored resources available to be matched by the group's filter
+// are the resources contained in the parent group. In other words, a group
+// contains the monitored resources that match its filter and the filters of all
+// the group's ancestors. A group without a parent can contain any monitored
+// resource.
+//
+// For example, consider an infrastructure running a set of instances with two
+// user-defined tags: `"environment"` and `"role"`. A parent group has a filter,
+// `environment="production"`. A child of that parent group has a filter,
+// `role="transcoder"`. The parent group contains all instances in the
+// production environment, regardless of their roles. The child group contains
+// instances that have the transcoder role *and* are in the production
+// environment.
+//
+// The monitored resources contained in a group can change at any moment,
+// depending on what resources exist and what filters are associated with the
+// group and its ancestors.
+message Group {
+ // Output only. The name of this group. The format is
+ // `"projects/{project_id_or_number}/groups/{group_id}"`.
+ // When creating a group, this field is ignored and a new name is created
+ // consisting of the project specified in the call to `CreateGroup`
+ // and a unique `{group_id}` that is generated automatically.
+ string name = 1;
+
+ // A user-assigned name for this group, used only for display purposes.
+ string display_name = 2;
+
+ // The name of the group's parent, if it has one.
+ // The format is `"projects/{project_id_or_number}/groups/{group_id}"`.
+ // For groups with no parent, `parentName` is the empty string, `""`.
+ string parent_name = 3;
+
+ // The filter used to determine which monitored resources belong to this group.
+ string filter = 5;
+
+ // If true, the members of this group are considered to be a cluster.
+ // The system can perform additional analysis on groups that are clusters.
+ bool is_cluster = 6;
+}
diff --git a/third_party/googleapis/google/monitoring/v3/group_service.proto b/third_party/googleapis/google/monitoring/v3/group_service.proto
new file mode 100644
index 0000000000..3450580cd8
--- /dev/null
+++ b/third_party/googleapis/google/monitoring/v3/group_service.proto
@@ -0,0 +1,206 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.monitoring.v3;
+
+import "google/api/annotations.proto";
+import "google/api/monitored_resource.proto";
+import "google/monitoring/v3/common.proto";
+import "google/monitoring/v3/group.proto";
+import "google/protobuf/empty.proto";
+
+option csharp_namespace = "Google.Cloud.Monitoring.V3";
+option go_package = "google.golang.org/genproto/googleapis/monitoring/v3;monitoring";
+option java_multiple_files = true;
+option java_outer_classname = "GroupServiceProto";
+option java_package = "com.google.monitoring.v3";
+
+
+// The Group API lets you inspect and manage your
+// [groups](google.monitoring.v3.Group).
+//
+// A group is a named filter that is used to identify
+// a collection of monitored resources. Groups are typically used to
+// mirror the physical and/or logical topology of the environment.
+// Because group membership is computed dynamically, monitored
+// resources that are started in the future are automatically placed
+// in matching groups. By using a group to name monitored resources in,
+// for example, an alert policy, the target of that alert policy is
+// updated automatically as monitored resources are added and removed
+// from the infrastructure.
+service GroupService {
+ // Lists the existing groups.
+ rpc ListGroups(ListGroupsRequest) returns (ListGroupsResponse) {
+ option (google.api.http) = { get: "/v3/{name=projects/*}/groups" };
+ }
+
+ // Gets a single group.
+ rpc GetGroup(GetGroupRequest) returns (Group) {
+ option (google.api.http) = { get: "/v3/{name=projects/*/groups/*}" };
+ }
+
+ // Creates a new group.
+ rpc CreateGroup(CreateGroupRequest) returns (Group) {
+ option (google.api.http) = { post: "/v3/{name=projects/*}/groups" body: "group" };
+ }
+
+ // Updates an existing group.
+ // You can change any group attributes except `name`.
+ rpc UpdateGroup(UpdateGroupRequest) returns (Group) {
+ option (google.api.http) = { put: "/v3/{group.name=projects/*/groups/*}" body: "group" };
+ }
+
+ // Deletes an existing group.
+ rpc DeleteGroup(DeleteGroupRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v3/{name=projects/*/groups/*}" };
+ }
+
+ // Lists the monitored resources that are members of a group.
+ rpc ListGroupMembers(ListGroupMembersRequest) returns (ListGroupMembersResponse) {
+ option (google.api.http) = { get: "/v3/{name=projects/*/groups/*}/members" };
+ }
+}
+
+// The `ListGroup` request.
+message ListGroupsRequest {
+ // The project whose groups are to be listed. The format is
+ // `"projects/{project_id_or_number}"`.
+ string name = 7;
+
+ // An optional filter consisting of a single group name. The filters limit the
+ // groups returned based on their parent-child relationship with the specified
+ // group. If no filter is specified, all groups are returned.
+ oneof filter {
+ // A group name: `"projects/{project_id_or_number}/groups/{group_id}"`.
+ // Returns groups whose `parentName` field contains the group
+ // name. If no groups have this parent, the results are empty.
+ string children_of_group = 2;
+
+ // A group name: `"projects/{project_id_or_number}/groups/{group_id}"`.
+ // Returns groups that are ancestors of the specified group.
+ // The groups are returned in order, starting with the immediate parent and
+ // ending with the most distant ancestor. If the specified group has no
+ // immediate parent, the results are empty.
+ string ancestors_of_group = 3;
+
+ // A group name: `"projects/{project_id_or_number}/groups/{group_id}"`.
+ // Returns the descendants of the specified group. This is a superset of
+ // the results returned by the `childrenOfGroup` filter, and includes
+ // children-of-children, and so forth.
+ string descendants_of_group = 4;
+ }
+
+ // A positive number that is the maximum number of results to return.
+ int32 page_size = 5;
+
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ string page_token = 6;
+}
+
+// The `ListGroups` response.
+message ListGroupsResponse {
+ // The groups that match the specified filters.
+ repeated Group group = 1;
+
+ // If there are more results than have been returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `pageToken` in the next call to this method.
+ string next_page_token = 2;
+}
+
+// The `GetGroup` request.
+message GetGroupRequest {
+ // The group to retrieve. The format is
+ // `"projects/{project_id_or_number}/groups/{group_id}"`.
+ string name = 3;
+}
+
+// The `CreateGroup` request.
+message CreateGroupRequest {
+ // The project in which to create the group. The format is
+ // `"projects/{project_id_or_number}"`.
+ string name = 4;
+
+ // A group definition. It is an error to define the `name` field because
+ // the system assigns the name.
+ Group group = 2;
+
+ // If true, validate this request but do not create the group.
+ bool validate_only = 3;
+}
+
+// The `UpdateGroup` request.
+message UpdateGroupRequest {
+ // The new definition of the group. All fields of the existing group,
+ // excepting `name`, are replaced with the corresponding fields of this group.
+ Group group = 2;
+
+ // If true, validate this request but do not update the existing group.
+ bool validate_only = 3;
+}
+
+// The `DeleteGroup` request. You can only delete a group if it has no children.
+message DeleteGroupRequest {
+ // The group to delete. The format is
+ // `"projects/{project_id_or_number}/groups/{group_id}"`.
+ string name = 3;
+}
+
+// The `ListGroupMembers` request.
+message ListGroupMembersRequest {
+ // The group whose members are listed. The format is
+ // `"projects/{project_id_or_number}/groups/{group_id}"`.
+ string name = 7;
+
+ // A positive number that is the maximum number of results to return.
+ int32 page_size = 3;
+
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ string page_token = 4;
+
+ // An optional [list filter](/monitoring/api/learn_more#filtering) describing
+ // the members to be returned. The filter may reference the type, labels, and
+ // metadata of monitored resources that comprise the group.
+ // For example, to return only resources representing Compute Engine VM
+ // instances, use this filter:
+ //
+ // resource.type = "gce_instance"
+ string filter = 5;
+
+ // An optional time interval for which results should be returned. Only
+ // members that were part of the group during the specified interval are
+ // included in the response. If no interval is provided then the group
+ // membership over the last minute is returned.
+ TimeInterval interval = 6;
+}
+
+// The `ListGroupMembers` response.
+message ListGroupMembersResponse {
+ // A set of monitored resources in the group.
+ repeated google.api.MonitoredResource members = 1;
+
+ // If there are more results than have been returned, then this field is
+ // set to a non-empty value. To see the additional results, use that value as
+ // `pageToken` in the next call to this method.
+ string next_page_token = 2;
+
+ // The total number of elements matching this request.
+ int32 total_size = 3;
+}
diff --git a/third_party/googleapis/google/monitoring/v3/metric.proto b/third_party/googleapis/google/monitoring/v3/metric.proto
new file mode 100644
index 0000000000..73ae603749
--- /dev/null
+++ b/third_party/googleapis/google/monitoring/v3/metric.proto
@@ -0,0 +1,87 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.monitoring.v3;
+
+import "google/api/metric.proto";
+import "google/api/monitored_resource.proto";
+import "google/monitoring/v3/common.proto";
+
+option csharp_namespace = "Google.Cloud.Monitoring.V3";
+option go_package = "google.golang.org/genproto/googleapis/monitoring/v3;monitoring";
+option java_multiple_files = true;
+option java_outer_classname = "MetricProto";
+option java_package = "com.google.monitoring.v3";
+
+
+// A single data point in a time series.
+message Point {
+ // The time interval to which the data point applies. For GAUGE metrics, only
+ // the end time of the interval is used. For DELTA metrics, the start and end
+ // time should specify a non-zero interval, with subsequent points specifying
+ // contiguous and non-overlapping intervals. For CUMULATIVE metrics, the
+ // start and end time should specify a non-zero interval, with subsequent
+ // points specifying the same start time and increasing end times, until an
+ // event resets the cumulative value to zero and sets a new start time for the
+ // following points.
+ TimeInterval interval = 1;
+
+ // The value of the data point.
+ TypedValue value = 2;
+}
+
+// A collection of data points that describes the time-varying values
+// of a metric. A time series is identified by a combination of a
+// fully-specified monitored resource and a fully-specified metric.
+// This type is used for both listing and creating time series.
+message TimeSeries {
+ // The associated metric. A fully-specified metric used to identify the time
+ // series.
+ google.api.Metric metric = 1;
+
+ // The associated resource. A fully-specified monitored resource used to
+ // identify the time series.
+ google.api.MonitoredResource resource = 2;
+
+ // The metric kind of the time series. When listing time series, this metric
+ // kind might be different from the metric kind of the associated metric if
+ // this time series is an alignment or reduction of other time series.
+ //
+ // When creating a time series, this field is optional. If present, it must be
+ // the same as the metric kind of the associated metric. If the associated
+ // metric's descriptor must be auto-created, then this field specifies the
+ // metric kind of the new descriptor and must be either `GAUGE` (the default)
+ // or `CUMULATIVE`.
+ google.api.MetricDescriptor.MetricKind metric_kind = 3;
+
+ // The value type of the time series. When listing time series, this value
+ // type might be different from the value type of the associated metric if
+ // this time series is an alignment or reduction of other time series.
+ //
+ // When creating a time series, this field is optional. If present, it must be
+ // the same as the type of the data in the `points` field.
+ google.api.MetricDescriptor.ValueType value_type = 4;
+
+ // The data points of this time series. When listing time series, the order of
+ // the points is specified by the list method.
+ //
+ // When creating a time series, this field must contain exactly one point and
+ // the point's type must be the same as the value type of the associated
+ // metric. If the associated metric's descriptor must be auto-created, then
+ // the value type of the descriptor is determined by the point's type, which
+ // must be `BOOL`, `INT64`, `DOUBLE`, or `DISTRIBUTION`.
+ repeated Point points = 5;
+}
diff --git a/third_party/googleapis/google/monitoring/v3/metric_service.proto b/third_party/googleapis/google/monitoring/v3/metric_service.proto
new file mode 100644
index 0000000000..0dd0b19d31
--- /dev/null
+++ b/third_party/googleapis/google/monitoring/v3/metric_service.proto
@@ -0,0 +1,286 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.monitoring.v3;
+
+import "google/api/annotations.proto";
+import "google/api/metric.proto";
+import "google/api/monitored_resource.proto";
+import "google/monitoring/v3/common.proto";
+import "google/monitoring/v3/metric.proto";
+import "google/protobuf/empty.proto";
+import "google/rpc/status.proto";
+
+option csharp_namespace = "Google.Cloud.Monitoring.V3";
+option go_package = "google.golang.org/genproto/googleapis/monitoring/v3;monitoring";
+option java_multiple_files = true;
+option java_outer_classname = "MetricServiceProto";
+option java_package = "com.google.monitoring.v3";
+
+
+// Manages metric descriptors, monitored resource descriptors, and
+// time series data.
+service MetricService {
+ // Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account.
+ rpc ListMonitoredResourceDescriptors(ListMonitoredResourceDescriptorsRequest) returns (ListMonitoredResourceDescriptorsResponse) {
+ option (google.api.http) = { get: "/v3/{name=projects/*}/monitoredResourceDescriptors" };
+ }
+
+ // Gets a single monitored resource descriptor. This method does not require a Stackdriver account.
+ rpc GetMonitoredResourceDescriptor(GetMonitoredResourceDescriptorRequest) returns (google.api.MonitoredResourceDescriptor) {
+ option (google.api.http) = { get: "/v3/{name=projects/*/monitoredResourceDescriptors/*}" };
+ }
+
+ // Lists metric descriptors that match a filter. This method does not require a Stackdriver account.
+ rpc ListMetricDescriptors(ListMetricDescriptorsRequest) returns (ListMetricDescriptorsResponse) {
+ option (google.api.http) = { get: "/v3/{name=projects/*}/metricDescriptors" };
+ }
+
+ // Gets a single metric descriptor. This method does not require a Stackdriver account.
+ rpc GetMetricDescriptor(GetMetricDescriptorRequest) returns (google.api.MetricDescriptor) {
+ option (google.api.http) = { get: "/v3/{name=projects/*/metricDescriptors/**}" };
+ }
+
+ // Creates a new metric descriptor.
+ // User-created metric descriptors define
+ // [custom metrics](/monitoring/custom-metrics).
+ rpc CreateMetricDescriptor(CreateMetricDescriptorRequest) returns (google.api.MetricDescriptor) {
+ option (google.api.http) = { post: "/v3/{name=projects/*}/metricDescriptors" body: "metric_descriptor" };
+ }
+
+ // Deletes a metric descriptor. Only user-created
+ // [custom metrics](/monitoring/custom-metrics) can be deleted.
+ rpc DeleteMetricDescriptor(DeleteMetricDescriptorRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v3/{name=projects/*/metricDescriptors/**}" };
+ }
+
+ // Lists time series that match a filter. This method does not require a Stackdriver account.
+ rpc ListTimeSeries(ListTimeSeriesRequest) returns (ListTimeSeriesResponse) {
+ option (google.api.http) = { get: "/v3/{name=projects/*}/timeSeries" };
+ }
+
+ // Creates or adds data to one or more time series.
+ // The response is empty if all time series in the request were written.
+ // If any time series could not be written, a corresponding failure message is
+ // included in the error response.
+ rpc CreateTimeSeries(CreateTimeSeriesRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { post: "/v3/{name=projects/*}/timeSeries" body: "*" };
+ }
+}
+
+// The `ListMonitoredResourceDescriptors` request.
+message ListMonitoredResourceDescriptorsRequest {
+ // The project on which to execute the request. The format is
+ // `"projects/{project_id_or_number}"`.
+ string name = 5;
+
+ // An optional [filter](/monitoring/api/v3/filters) describing
+ // the descriptors to be returned. The filter can reference
+ // the descriptor's type and labels. For example, the
+ // following filter returns only Google Compute Engine descriptors
+ // that have an `id` label:
+ //
+ // resource.type = starts_with("gce_") AND resource.label:id
+ string filter = 2;
+
+ // A positive number that is the maximum number of results to return.
+ int32 page_size = 3;
+
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ string page_token = 4;
+}
+
+// The `ListMonitoredResourcDescriptors` response.
+message ListMonitoredResourceDescriptorsResponse {
+ // The monitored resource descriptors that are available to this project
+ // and that match `filter`, if present.
+ repeated google.api.MonitoredResourceDescriptor resource_descriptors = 1;
+
+ // If there are more results than have been returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `pageToken` in the next call to this method.
+ string next_page_token = 2;
+}
+
+// The `GetMonitoredResourceDescriptor` request.
+message GetMonitoredResourceDescriptorRequest {
+ // The monitored resource descriptor to get. The format is
+ // `"projects/{project_id_or_number}/monitoredResourceDescriptors/{resource_type}"`.
+ // The `{resource_type}` is a predefined type, such as
+ // `cloudsql_database`.
+ string name = 3;
+}
+
+// The `ListMetricDescriptors` request.
+message ListMetricDescriptorsRequest {
+ // The project on which to execute the request. The format is
+ // `"projects/{project_id_or_number}"`.
+ string name = 5;
+
+ // If this field is empty, all custom and
+ // system-defined metric descriptors are returned.
+ // Otherwise, the [filter](/monitoring/api/v3/filters)
+ // specifies which metric descriptors are to be
+ // returned. For example, the following filter matches all
+ // [custom metrics](/monitoring/custom-metrics):
+ //
+ // metric.type = starts_with("custom.googleapis.com/")
+ string filter = 2;
+
+ // A positive number that is the maximum number of results to return.
+ int32 page_size = 3;
+
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ string page_token = 4;
+}
+
+// The `ListMetricDescriptors` response.
+message ListMetricDescriptorsResponse {
+ // The metric descriptors that are available to the project
+ // and that match the value of `filter`, if present.
+ repeated google.api.MetricDescriptor metric_descriptors = 1;
+
+ // If there are more results than have been returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `pageToken` in the next call to this method.
+ string next_page_token = 2;
+}
+
+// The `GetMetricDescriptor` request.
+message GetMetricDescriptorRequest {
+ // The metric descriptor on which to execute the request. The format is
+ // `"projects/{project_id_or_number}/metricDescriptors/{metric_id}"`.
+ // An example value of `{metric_id}` is
+ // `"compute.googleapis.com/instance/disk/read_bytes_count"`.
+ string name = 3;
+}
+
+// The `CreateMetricDescriptor` request.
+message CreateMetricDescriptorRequest {
+ // The project on which to execute the request. The format is
+ // `"projects/{project_id_or_number}"`.
+ string name = 3;
+
+ // The new [custom metric](/monitoring/custom-metrics)
+ // descriptor.
+ google.api.MetricDescriptor metric_descriptor = 2;
+}
+
+// The `DeleteMetricDescriptor` request.
+message DeleteMetricDescriptorRequest {
+ // The metric descriptor on which to execute the request. The format is
+ // `"projects/{project_id_or_number}/metricDescriptors/{metric_id}"`.
+ // An example of `{metric_id}` is:
+ // `"custom.googleapis.com/my_test_metric"`.
+ string name = 3;
+}
+
+// The `ListTimeSeries` request.
+message ListTimeSeriesRequest {
+ // Controls which fields are returned by `ListTimeSeries`.
+ enum TimeSeriesView {
+ // Returns the identity of the metric(s), the time series,
+ // and the time series data.
+ FULL = 0;
+
+ // Returns the identity of the metric and the time series resource,
+ // but not the time series data.
+ HEADERS = 1;
+ }
+
+ // The project on which to execute the request. The format is
+ // "projects/{project_id_or_number}".
+ string name = 10;
+
+ // A [monitoring filter](/monitoring/api/v3/filters) that specifies which time
+ // series should be returned. The filter must specify a single metric type,
+ // and can additionally specify metric labels and other information. For
+ // example:
+ //
+ // metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND
+ // metric.label.instance_name = "my-instance-name"
+ string filter = 2;
+
+ // The time interval for which results should be returned. Only time series
+ // that contain data points in the specified interval are included
+ // in the response.
+ TimeInterval interval = 4;
+
+ // By default, the raw time series data is returned.
+ // Use this field to combine multiple time series for different
+ // views of the data.
+ Aggregation aggregation = 5;
+
+ // Specifies the order in which the points of the time series should
+ // be returned. By default, results are not ordered. Currently,
+ // this field must be left blank.
+ string order_by = 6;
+
+ // Specifies which information is returned about the time series.
+ TimeSeriesView view = 7;
+
+ // A positive number that is the maximum number of results to return.
+ // When `view` field sets to `FULL`, it limits the number of `Points` server
+ // will return; if `view` field is `HEADERS`, it limits the number of
+ // `TimeSeries` server will return.
+ int32 page_size = 8;
+
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ string page_token = 9;
+}
+
+// The `ListTimeSeries` response.
+message ListTimeSeriesResponse {
+ // One or more time series that match the filter included in the request.
+ repeated TimeSeries time_series = 1;
+
+ // If there are more results than have been returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `pageToken` in the next call to this method.
+ string next_page_token = 2;
+}
+
+// The `CreateTimeSeries` request.
+message CreateTimeSeriesRequest {
+ // The project on which to execute the request. The format is
+ // `"projects/{project_id_or_number}"`.
+ string name = 3;
+
+ // The new data to be added to a list of time series.
+ // Adds at most one data point to each of several time series. The new data
+ // point must be more recent than any other point in its time series. Each
+ // `TimeSeries` value must fully specify a unique time series by supplying
+ // all label values for the metric and the monitored resource.
+ repeated TimeSeries time_series = 2;
+}
+
+// Describes the result of a failed request to write data to a time series.
+message CreateTimeSeriesError {
+ // The time series, including the `Metric`, `MonitoredResource`,
+ // and `Point`s (including timestamp and value) that resulted
+ // in the error. This field provides all of the context that
+ // would be needed to retry the operation.
+ TimeSeries time_series = 1;
+
+ // The status of the requested write operation.
+ google.rpc.Status status = 2;
+}
diff --git a/third_party/googleapis/google/monitoring/v3/monitoring_gapic.yaml b/third_party/googleapis/google/monitoring/v3/monitoring_gapic.yaml
new file mode 100644
index 0000000000..224c44064c
--- /dev/null
+++ b/third_party/googleapis/google/monitoring/v3/monitoring_gapic.yaml
@@ -0,0 +1,371 @@
+type: com.google.api.codegen.ConfigProto
+language_settings:
+ java:
+ package_name: com.google.cloud.monitoring.spi.v3
+ python:
+ package_name: google.cloud.gapic.monitoring.v3
+ go:
+ package_name: cloud.google.com/go/monitoring/apiv3
+ csharp:
+ package_name: Google.Cloud.Monitoring.V3
+ ruby:
+ package_name: Google::Cloud::Monitoring::V3
+ php:
+ package_name: Google\Cloud\Monitoring\V3
+ nodejs:
+ package_name: monitoring.v3
+ domain_layer_location: google-cloud
+license_header:
+ copyright_file: copyright-google.txt
+ license_file: license-header-apache-2.0.txt
+collections:
+- name_pattern: projects/{project}
+ entity_name: project
+- name_pattern: projects/{project}/metricDescriptors/{metric_descriptor=**}
+ entity_name: metric_descriptor
+- name_pattern: projects/{project}/monitoredResourceDescriptors/{monitored_resource_descriptor}
+ entity_name: monitored_resource_descriptor
+- name_pattern: projects/{project}/groups/{group}
+ entity_name: group
+interfaces:
+- name: google.monitoring.v3.GroupService
+ collections:
+ - name_pattern: projects/{project}
+ entity_name: project
+ - name_pattern: projects/{project}/groups/{group}
+ entity_name: group
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 20000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 20000
+ total_timeout_millis: 600000
+ methods:
+ - name: ListGroups
+ # Removing flattening until we figure out how to deal with oneof
+ required_fields:
+ - name
+ request_object_method: true
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: group
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: project
+ resource_name_treatment: STATIC_TYPES
+ timeout_millis: 60000
+ - name: GetGroup
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: group
+ resource_name_treatment: STATIC_TYPES
+ timeout_millis: 60000
+ - name: CreateGroup
+ flattening:
+ groups:
+ - parameters:
+ - name
+ - group
+ required_fields:
+ - name
+ - group
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: project
+ resource_name_treatment: STATIC_TYPES
+ timeout_millis: 60000
+ - name: UpdateGroup
+ flattening:
+ groups:
+ - parameters:
+ - group
+ required_fields:
+ - group
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ group.name: group
+ resource_name_treatment: STATIC_TYPES
+ timeout_millis: 60000
+ - name: DeleteGroup
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: group
+ resource_name_treatment: STATIC_TYPES
+ timeout_millis: 60000
+ - name: ListGroupMembers
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: true
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: members
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: group
+ resource_name_treatment: STATIC_TYPES
+ timeout_millis: 60000
+- name: google.monitoring.v3.MetricService
+ smoke_test:
+ method: ListMonitoredResourceDescriptors
+ init_fields:
+ - name%project=$PROJECT_ID
+ collections:
+ - name_pattern: projects/{project}
+ entity_name: project
+ - name_pattern: projects/{project}/metricDescriptors/{metric_descriptor=**}
+ entity_name: metric_descriptor
+ - name_pattern: projects/{project}/monitoredResourceDescriptors/{monitored_resource_descriptor}
+ entity_name: monitored_resource_descriptor
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 20000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 20000
+ total_timeout_millis: 600000
+ methods:
+ - name: ListMonitoredResourceDescriptors
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: true
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: resource_descriptors
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: project
+ resource_name_treatment: STATIC_TYPES
+ timeout_millis: 60000
+ - name: GetMonitoredResourceDescriptor
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: monitored_resource_descriptor
+ resource_name_treatment: STATIC_TYPES
+ timeout_millis: 60000
+ - name: ListMetricDescriptors
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: true
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: metric_descriptors
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: project
+ resource_name_treatment: STATIC_TYPES
+ timeout_millis: 60000
+ - name: GetMetricDescriptor
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: metric_descriptor
+ resource_name_treatment: STATIC_TYPES
+ timeout_millis: 60000
+ - name: CreateMetricDescriptor
+ flattening:
+ groups:
+ - parameters:
+ - name
+ - metric_descriptor
+ required_fields:
+ - name
+ - metric_descriptor
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: project
+ resource_name_treatment: STATIC_TYPES
+ timeout_millis: 60000
+ - name: DeleteMetricDescriptor
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: metric_descriptor
+ resource_name_treatment: STATIC_TYPES
+ timeout_millis: 60000
+ - name: ListTimeSeries
+ flattening:
+ groups:
+ - parameters:
+ - name
+ - filter
+ - interval
+ - view
+ required_fields:
+ - name
+ - filter
+ - interval
+ - view
+ request_object_method: true
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: time_series
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: project
+ resource_name_treatment: STATIC_TYPES
+ timeout_millis: 60000
+ - name: CreateTimeSeries
+ flattening:
+ groups:
+ - parameters:
+ - name
+ - time_series
+ required_fields:
+ - name
+ - time_series
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: project
+ resource_name_treatment: STATIC_TYPES
+ timeout_millis: 60000
+resource_name_generation:
+- message_name: Group
+ field_entity_map:
+ name: group
+ parent_name: group
+- message_name: ListGroupsRequest
+ field_entity_map:
+ name: project
+ children_of_group: group
+ ancestors_of_group: group
+ descendants_of_group: group
+- message_name: GetGroupRequest
+ field_entity_map:
+ name: group
+- message_name: CreateGroupRequest
+ field_entity_map:
+ name: project
+- message_name: DeleteGroupRequest
+ field_entity_map:
+ name: group
+- message_name: ListGroupMembersRequest
+ field_entity_map:
+ name: group
+- message_name: ListMonitoredResourceDescriptorsRequest
+ field_entity_map:
+ name: project
+- message_name: GetMonitoredResourceDescriptorRequest
+ field_entity_map:
+ name: monitored_resource_descriptor
+- message_name: ListMetricDescriptorsRequest
+ field_entity_map:
+ name: project
+- message_name: GetMetricDescriptorRequest
+ field_entity_map:
+ name: metric_descriptor
+- message_name: CreateMetricDescriptorRequest
+ field_entity_map:
+ name: project
+- message_name: DeleteMetricDescriptorRequest
+ field_entity_map:
+ name: metric_descriptor
+- message_name: ListTimeSeriesRequest
+ field_entity_map:
+ name: project
+- message_name: CreateTimeSeriesRequest
+ field_entity_map:
+ name: project
diff --git a/third_party/googleapis/google/privacy/dlp/README.md b/third_party/googleapis/google/privacy/dlp/README.md
new file mode 100644
index 0000000000..629d72b89d
--- /dev/null
+++ b/third_party/googleapis/google/privacy/dlp/README.md
@@ -0,0 +1,7 @@
+# Google Data Loss Prevention (DLP) API
+
+The Google Data Loss Prevention API provides methods for detection
+of privacy-sensitive fragments in text, images, and Google Cloud
+Platform storage repositories.
+
+Documentation: https://cloud.google.com/dlp/docs
diff --git a/third_party/googleapis/google/privacy/dlp/dlp.yaml b/third_party/googleapis/google/privacy/dlp/dlp.yaml
new file mode 100644
index 0000000000..082a6b7522
--- /dev/null
+++ b/third_party/googleapis/google/privacy/dlp/dlp.yaml
@@ -0,0 +1,59 @@
+# The DLP API Definition.
+
+type: google.api.Service
+config_version: 3
+name: dlp.googleapis.com
+
+title: DLP API
+
+documentation:
+ summary:
+ The Google Data Loss Prevention API provides methods for detection
+ of privacy-sensitive fragments in text, images, and Google Cloud
+ Platform storage repositories.
+ overview: |
+ (== suppress_warning documentation-presence ==)
+ rules:
+ - selector: google.longrunning.ListOperationsRequest
+ description: Fetch the list of long running operations.`
+
+ - selector: google.longrunning.ListOperationsRequest.filter
+ description: This parameter supports filtering by done, ie done=true or done=false.
+
+ - selector: google.longrunning.ListOperationsRequest.page_size
+ description: The list page size. The max allowed value is 256 and default is 100.
+
+ - selector: google.longrunning.ListOperationsRequest.page_token
+ description: The list page token.
+
+ - selector: google.longrunning.Operation.name
+ description: >
+ The server-assigned name, The `name`
+ should have the format of `inspect/operations/<identifier>`.
+
+ - selector: google.longrunning.Operation.metadata
+ description: >
+ This field will contain an
+ [`InspectOperationMetdata`][google.privacy.dlp.v2beta1.InspectOperationMetadata] object.
+
+ - selector: google.longrunning.Operation.response
+ description: >
+ This field will contain an
+ [`InspectOperationResult`][google.privacy.dlp.v2beta1.InspectOperationResult] object.
+
+ - selector: google.longrunning.Operations.CancelOperation
+ description: >
+ Cancels an operation. Use the get method to check whether the cancellation
+ succeeded or whether the operation completed despite cancellation.
+
+ - selector: google.longrunning.Operations.DeleteOperation
+ description: This method is not supported and the server returns `UNIMPLEMENTED`.
+
+apis:
+ - name: google.privacy.dlp.v2beta1.DlpService
+
+authentication:
+ rules:
+ - selector: '*'
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/cloud-platform
diff --git a/third_party/googleapis/google/privacy/dlp/v2beta1/dlp.proto b/third_party/googleapis/google/privacy/dlp/v2beta1/dlp.proto
new file mode 100644
index 0000000000..dd28dc7d7e
--- /dev/null
+++ b/third_party/googleapis/google/privacy/dlp/v2beta1/dlp.proto
@@ -0,0 +1,404 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.privacy.dlp.v2beta1;
+
+import "google/api/annotations.proto";
+import "google/longrunning/operations.proto";
+import "google/privacy/dlp/v2beta1/storage.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1;dlp";
+option java_multiple_files = true;
+option java_outer_classname = "DlpProto";
+option java_package = "com.google.privacy.dlp.v2beta1";
+
+
+// The DLP API is a service that allows clients
+// to detect the presence of Personally Identifiable Information (PII) and other
+// privacy-sensitive data in user-supplied, unstructured data streams, like text
+// blocks or images.
+// The service also includes methods for sensitive data redaction and
+// scheduling of data scans on Google Cloud Platform based data sets.
+service DlpService {
+ // Find potentially sensitive info in a list of strings.
+ // This method has limits on input size, processing time, and output size.
+ rpc InspectContent(InspectContentRequest) returns (InspectContentResponse) {
+ option (google.api.http) = { post: "/v2beta1/content:inspect" body: "*" };
+ }
+
+ // Redact potentially sensitive info from a list of strings.
+ // This method has limits on input size, processing time, and output size.
+ rpc RedactContent(RedactContentRequest) returns (RedactContentResponse) {
+ option (google.api.http) = { post: "/v2beta1/content:redact" body: "*" };
+ }
+
+ // Schedule a job scanning content in a Google Cloud Platform data repository.
+ rpc CreateInspectOperation(CreateInspectOperationRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v2beta1/inspect/operations" body: "*" };
+ }
+
+ // Returns list of results for given inspect operation result set id.
+ rpc ListInspectFindings(ListInspectFindingsRequest) returns (ListInspectFindingsResponse) {
+ option (google.api.http) = { get: "/v2beta1/{name=inspect/results/*}/findings" };
+ }
+
+ // Returns sensitive information types for given category.
+ rpc ListInfoTypes(ListInfoTypesRequest) returns (ListInfoTypesResponse) {
+ option (google.api.http) = { get: "/v2beta1/rootCategories/{category=*}/infoTypes" };
+ }
+
+ // Returns the list of root categories of sensitive information.
+ rpc ListRootCategories(ListRootCategoriesRequest) returns (ListRootCategoriesResponse) {
+ option (google.api.http) = { get: "/v2beta1/rootCategories" };
+ }
+}
+
+// Configuration description of the scanning process.
+// When used with redactContent only info_types and min_likelihood are currently
+// used.
+message InspectConfig {
+ // Restrict what info_types to look for. The values must correspond to
+ // InfoType values returned by ListInfoTypes or found in documentation.
+ // Empty info_types runs all enabled detectors.
+ repeated InfoType info_types = 1;
+
+ // Only return findings equal or above this threshold.
+ Likelihood min_likelihood = 2;
+
+ // Limit the number of findings per content item.
+ int32 max_findings = 3;
+
+ // When true, a contextual quote from the data that triggered a finding will
+ // be included in the response; see Finding.quote.
+ bool include_quote = 4;
+
+ // When true, exclude type information of the findings.
+ bool exclude_types = 6;
+}
+
+// Container structure for the content to inspect.
+message ContentItem {
+ // Type of the content, as defined in Content-Type HTTP header.
+ // Supported types are: all "text" types, octet streams, PNG images,
+ // JPEG images.
+ string type = 1;
+
+ // Data of the item either in the byte array or UTF-8 string form.
+ oneof data_item {
+ // Content data to inspect or redact.
+ bytes data = 2;
+
+ // String data to inspect or redact.
+ string value = 3;
+ }
+}
+
+// All the findings for a single scanned item.
+message InspectResult {
+ // List of findings for an item.
+ repeated Finding findings = 1;
+
+ // If true, then this item might have more findings than were returned,
+ // and the findings returned are an arbitrary subset of all findings.
+ // The findings list might be truncated because the input items were too
+ // large, or because the server reached the maximum amount of resources
+ // allowed for a single API call. For best results, divide the input into
+ // smaller batches.
+ bool findings_truncated = 2;
+}
+
+// Container structure describing a single finding within a string or image.
+message Finding {
+ // The specific string that may be potentially sensitive info.
+ string quote = 1;
+
+ // The specific type of info the string might be.
+ InfoType info_type = 2;
+
+ // Estimate of how likely it is that the info_type is correct.
+ Likelihood likelihood = 3;
+
+ // Location of the info found.
+ Location location = 4;
+
+ // Timestamp when finding was detected.
+ google.protobuf.Timestamp create_time = 6;
+}
+
+// Specifies the location of a finding within its source item.
+message Location {
+ // Zero-based byte offsets within a content item.
+ Range byte_range = 1;
+
+ // Character offsets within a content item, included when content type
+ // is a text. Default charset assumed to be UTF-8.
+ Range codepoint_range = 2;
+
+ // Location within an image's pixels.
+ repeated ImageLocation image_boxes = 3;
+
+ // Key of the finding.
+ RecordKey record_key = 4;
+
+ // Field id of the field containing the finding.
+ FieldId field_id = 5;
+}
+
+// Generic half-open interval [start, end)
+message Range {
+ // Index of the first character of the range (inclusive).
+ int64 start = 1;
+
+ // Index of the last character of the range (exclusive).
+ int64 end = 2;
+}
+
+// Bounding box encompassing detected text within an image.
+message ImageLocation {
+ // Top coordinate of the bounding box. (0,0) is upper left.
+ int32 top = 1;
+
+ // Left coordinate of the bounding box. (0,0) is upper left.
+ int32 left = 2;
+
+ // Width of the bounding box in pixels.
+ int32 width = 3;
+
+ // Height of the bounding box in pixels.
+ int32 height = 4;
+}
+
+// Request to search for potentially sensitive info in a list of items
+// and replace it with a default or provided content.
+message RedactContentRequest {
+ message ReplaceConfig {
+ // Type of information to replace. Only one ReplaceConfig per info_type
+ // should be provided. If ReplaceConfig does not have an info_type, we'll
+ // match it against all info_types that are found but not specified in
+ // another ReplaceConfig.
+ InfoType info_type = 1;
+
+ // Content replacing sensitive information of given type. Max 256 chars.
+ string replace_with = 2;
+ }
+
+ // Configuration for the inspector.
+ InspectConfig inspect_config = 1;
+
+ // The list of items to inspect. Up to 100 are allowed per request.
+ repeated ContentItem items = 2;
+
+ // The strings to replace findings with. Must specify at least one.
+ repeated ReplaceConfig replace_configs = 3;
+}
+
+// Results of deidentifying a list of items.
+message RedactContentResponse {
+ // The redacted content.
+ repeated ContentItem items = 1;
+}
+
+// Request to search for potentially sensitive info in a list of items.
+message InspectContentRequest {
+ // Configuration for the inspector.
+ InspectConfig inspect_config = 1;
+
+ // The list of items to inspect. Items in a single request are
+ // considered "related" unless inspect_config.independent_inputs is true.
+ // Up to 100 are allowed per request.
+ repeated ContentItem items = 2;
+}
+
+// Results of inspecting a list of items.
+message InspectContentResponse {
+ // Each content_item from the request will have a result in this list, in the
+ // same order as the request.
+ repeated InspectResult results = 1;
+}
+
+// Request for scheduling a scan of a data subset from a Google Platform data
+// repository.
+message CreateInspectOperationRequest {
+ // Configuration for the inspector.
+ InspectConfig inspect_config = 1;
+
+ // Specification of the data set to process.
+ StorageConfig storage_config = 2;
+
+ // Optional location to store findings. The bucket must already exist and
+ // the Google APIs service account for DLP must have write permission to
+ // write to the given bucket.
+ // Results will be split over multiple csv files with each file name matching
+ // the pattern "[operation_id] + [count].csv".
+ // The operation_id will match the identifier for the Operation,
+ // and the [count] is a counter used for tracking the number of files written.
+ // The CSV file(s) contain the following columns regardless of storage type
+ // scanned: id, info_type, likelihood, byte size of finding, quote, time_stamp
+ // For cloud storage the next two columns are: file_path, start_offset
+ // For datastore the next two columns are: project_id, namespace_id, path,
+ // column_name, offset.
+ OutputStorageConfig output_config = 3;
+}
+
+// Cloud repository for storing output.
+message OutputStorageConfig {
+ oneof type {
+ // The path to a Google Storage location to store output.
+ CloudStoragePath storage_path = 2;
+ }
+}
+
+// Stats regarding a specific InfoType.
+message InfoTypeStatistics {
+ // The type of finding this stat is for.
+ InfoType info_type = 1;
+
+ // Number of findings for this info type.
+ int64 count = 2;
+}
+
+// Metadata returned within GetOperation for an inspect request.
+message InspectOperationMetadata {
+ // Total size in bytes that were processed.
+ int64 processed_bytes = 1;
+
+ // Estimate of the number of bytes to process.
+ int64 total_estimated_bytes = 4;
+
+ repeated InfoTypeStatistics info_type_stats = 2;
+
+ // The time which this request was started.
+ google.protobuf.Timestamp create_time = 3;
+
+ // The inspect config used to create the Operation.
+ InspectConfig request_inspect_config = 5;
+
+ // The storage config used to create the Operation.
+ StorageConfig request_storage_config = 6;
+
+ // Optional location to store findings.
+ OutputStorageConfig request_output_config = 7;
+}
+
+// The operational data.
+message InspectOperationResult {
+ // The server-assigned name, which is only unique within the same service that
+ // originally returns it. If you use the default HTTP mapping, the
+ // `name` should have the format of `inspect/results/{id}`.
+ string name = 1;
+}
+
+// Request for the list of results in a given inspect operation.
+message ListInspectFindingsRequest {
+ // Identifier of the results set returned as metadata of
+ // the longrunning operation created by a call to CreateInspectOperation.
+ // Should be in the format of `inspect/results/{id}.
+ string name = 1;
+
+ // Maximum number of results to return.
+ // If 0, the implementation will select a reasonable value.
+ int32 page_size = 2;
+
+ // The value returned by the last `ListInspectFindingsResponse`; indicates
+ // that this is a continuation of a prior `ListInspectFindings` call, and that
+ // the system should return the next page of data.
+ string page_token = 3;
+}
+
+// Response to the ListInspectFindings request.
+message ListInspectFindingsResponse {
+ // The results.
+ InspectResult result = 1;
+
+ // If not empty, indicates that there may be more results that match the
+ // request; this value should be passed in a new `ListInspectFindingsRequest`.
+ string next_page_token = 2;
+}
+
+// Info type description.
+message InfoTypeDescription {
+ // Internal name of the info type.
+ string name = 1;
+
+ // Human readable form of the info type name.
+ string display_name = 2;
+
+ // List of categories this info type belongs to.
+ repeated CategoryDescription categories = 3;
+}
+
+// Request for the list of info types belonging to a given category,
+// or all supported info types if no category is specified.
+message ListInfoTypesRequest {
+ // Category name as returned by ListRootCategories.
+ string category = 1;
+
+ // Optional BCP-47 language code for localized info type friendly
+ // names. If omitted, or if localized strings are not available,
+ // en-US strings will be returned.
+ string language_code = 2;
+}
+
+// Response to the ListInfoTypes request.
+message ListInfoTypesResponse {
+ // Set of sensitive info types belonging to a category.
+ repeated InfoTypeDescription info_types = 1;
+}
+
+// Info Type Category description.
+message CategoryDescription {
+ // Internal name of the category.
+ string name = 1;
+
+ // Human readable form of the category name.
+ string display_name = 2;
+}
+
+// Request for root categories of Info Types supported by the API.
+// Example values might include "FINANCE", "HEALTH", "FAST", "DEFAULT".
+message ListRootCategoriesRequest {
+ // Optional language code for localized friendly category names.
+ // If omitted or if localized strings are not available,
+ // en-US strings will be returned.
+ string language_code = 1;
+}
+
+// Response for ListRootCategories request.
+message ListRootCategoriesResponse {
+ // List of all into type categories supported by the API.
+ repeated CategoryDescription categories = 1;
+}
+
+// Categorization of results based on how likely they are to represent a match,
+// based on the number of elements they contain which imply a match.
+enum Likelihood {
+ // Default value; information with all likelihoods will be included.
+ LIKELIHOOD_UNSPECIFIED = 0;
+
+ // Few matching elements.
+ VERY_UNLIKELY = 1;
+
+ UNLIKELY = 2;
+
+ // Some matching elements.
+ POSSIBLE = 3;
+
+ LIKELY = 4;
+
+ // Many matching elements.
+ VERY_LIKELY = 5;
+}
diff --git a/third_party/googleapis/google/privacy/dlp/v2beta1/dlp_gapic.yaml b/third_party/googleapis/google/privacy/dlp/v2beta1/dlp_gapic.yaml
new file mode 100644
index 0000000000..b19acf037c
--- /dev/null
+++ b/third_party/googleapis/google/privacy/dlp/v2beta1/dlp_gapic.yaml
@@ -0,0 +1,122 @@
+type: com.google.api.codegen.ConfigProto
+language_settings:
+ java:
+ package_name: com.google.cloud.privacy.dlp.spi.v2beta1
+ python:
+ package_name: google.cloud.gapic.privacy.dlp.v2beta1
+ go:
+ package_name: cloud.google.com/go/privacy/dlp/apiv2beta1
+ csharp:
+ package_name: Google.Privacy.Dlp.V2beta1
+ ruby:
+ package_name: Google::Cloud::Privacy::Dlp::V2beta1
+ php:
+ package_name: Google\Cloud\Privacy\Dlp\V2beta1
+ nodejs:
+ package_name: dlp.v2beta1
+license_header:
+ copyright_file: copyright-google.txt
+ license_file: license-header-apache-2.0.txt
+interfaces:
+- name: google.privacy.dlp.v2beta1.DlpService
+ collections:
+ - name_pattern: inspect/results/{result}
+ entity_name: result
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 20000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 20000
+ total_timeout_millis: 600000
+ methods:
+ - name: InspectContent
+ flattening:
+ groups:
+ - parameters:
+ - inspect_config
+ - items
+ required_fields:
+ - inspect_config
+ - items
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: RedactContent
+ flattening:
+ groups:
+ - parameters:
+ - inspect_config
+ - items
+ - replace_configs
+ required_fields:
+ - inspect_config
+ - items
+ - replace_configs
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: CreateInspectOperation
+ flattening:
+ groups:
+ - parameters:
+ - inspect_config
+ - storage_config
+ - output_config
+ required_fields:
+ - inspect_config
+ - storage_config
+ - output_config
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: ListInspectFindings
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: result
+ timeout_millis: 60000
+ - name: ListInfoTypes
+ flattening:
+ groups:
+ - parameters:
+ - category
+ - language_code
+ required_fields:
+ - category
+ - language_code
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ - name: ListRootCategories
+ flattening:
+ groups:
+ - parameters:
+ - language_code
+ required_fields:
+ - language_code
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
diff --git a/third_party/googleapis/google/privacy/dlp/v2beta1/storage.proto b/third_party/googleapis/google/privacy/dlp/v2beta1/storage.proto
new file mode 100644
index 0000000000..792f914b0b
--- /dev/null
+++ b/third_party/googleapis/google/privacy/dlp/v2beta1/storage.proto
@@ -0,0 +1,186 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.privacy.dlp.v2beta1;
+
+import "google/api/annotations.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1;dlp";
+option java_multiple_files = true;
+option java_outer_classname = "DlpStorage";
+option java_package = "com.google.privacy.dlp.v2beta1";
+
+
+// Type of information detected by the API.
+message InfoType {
+ // Name of the information type, provided by the API call ListInfoTypes.
+ string name = 1;
+}
+
+// General identifier of a data field in a storage service.
+message FieldId {
+ // Column name describing the field.
+ string column_name = 1;
+}
+
+// Datastore partition ID.
+// A partition ID identifies a grouping of entities. The grouping is always
+// by project and namespace, however the namespace ID may be empty.
+//
+// A partition ID contains several dimensions:
+// project ID and namespace ID.
+message PartitionId {
+ // The ID of the project to which the entities belong.
+ string project_id = 2;
+
+ // If not empty, the ID of the namespace to which the entities belong.
+ string namespace_id = 4;
+}
+
+// A representation of a Datastore kind.
+message KindExpression {
+ // The name of the kind.
+ string name = 1;
+}
+
+// A reference to a property relative to the Datastore kind expressions.
+message PropertyReference {
+ // The name of the property.
+ // If name includes "."s, it may be interpreted as a property name path.
+ string name = 2;
+}
+
+// A representation of a Datastore property in a projection.
+message Projection {
+ // The property to project.
+ PropertyReference property = 1;
+}
+
+// Options defining a data set within Google Cloud Datastore.
+message DatastoreOptions {
+ // A partition ID identifies a grouping of entities. The grouping is always
+ // by project and namespace, however the namespace ID may be empty.
+ PartitionId partition_id = 1;
+
+ // The kind to process.
+ KindExpression kind = 2;
+
+ // Properties to scan. If none are specified, all properties will be scanned
+ // by default.
+ repeated Projection projection = 3;
+}
+
+// Options defining a file or a set of files (path ending with *) within
+// a Google Cloud Storage bucket.
+message CloudStorageOptions {
+ // Set of files to scan.
+ message FileSet {
+ // The url, in the format gs://<bucket>/<path>. Trailing wildcard in the
+ // path is allowed.
+ string url = 1;
+ }
+
+ FileSet file_set = 1;
+}
+
+// A location in Cloud Storage.
+message CloudStoragePath {
+ // The url, in the format of gs://bucket/<path>.
+ string path = 1;
+}
+
+// Shared message indicating Cloud storage type.
+message StorageConfig {
+ oneof type {
+ // Google Cloud Datastore options specification.
+ DatastoreOptions datastore_options = 2;
+
+ // Google Cloud Storage options specification.
+ CloudStorageOptions cloud_storage_options = 3;
+ }
+}
+
+// Record key for a finding in a Cloud Storage file.
+message CloudStorageKey {
+ // Path to the file.
+ string file_path = 1;
+
+ // Byte offset of the referenced data in the file.
+ int64 start_offset = 2;
+}
+
+// Record key for a finding in Cloud Datastore.
+message DatastoreKey {
+ // Datastore entity key.
+ Key entity_key = 1;
+}
+
+// A unique identifier for a Datastore entity.
+// If a key's partition ID or any of its path kinds or names are
+// reserved/read-only, the key is reserved/read-only.
+// A reserved/read-only key is forbidden in certain documented contexts.
+message Key {
+ // A (kind, ID/name) pair used to construct a key path.
+ //
+ // If either name or ID is set, the element is complete.
+ // If neither is set, the element is incomplete.
+ message PathElement {
+ // The kind of the entity.
+ // A kind matching regex `__.*__` is reserved/read-only.
+ // A kind must not contain more than 1500 bytes when UTF-8 encoded.
+ // Cannot be `""`.
+ string kind = 1;
+
+ // The type of ID.
+ oneof id_type {
+ // The auto-allocated ID of the entity.
+ // Never equal to zero. Values less than zero are discouraged and may not
+ // be supported in the future.
+ int64 id = 2;
+
+ // The name of the entity.
+ // A name matching regex `__.*__` is reserved/read-only.
+ // A name must not be more than 1500 bytes when UTF-8 encoded.
+ // Cannot be `""`.
+ string name = 3;
+ }
+ }
+
+ // Entities are partitioned into subsets, currently identified by a project
+ // ID and namespace ID.
+ // Queries are scoped to a single partition.
+ PartitionId partition_id = 1;
+
+ // The entity path.
+ // An entity path consists of one or more elements composed of a kind and a
+ // string or numerical identifier, which identify entities. The first
+ // element identifies a _root entity_, the second element identifies
+ // a _child_ of the root entity, the third element identifies a child of the
+ // second entity, and so forth. The entities identified by all prefixes of
+ // the path are called the element's _ancestors_.
+ //
+ // A path can never be empty, and a path can have at most 100 elements.
+ repeated PathElement path = 2;
+}
+
+// Message for a unique key indicating a record that contains a finding.
+message RecordKey {
+ oneof type {
+ CloudStorageKey cloud_storage_key = 1;
+
+ DatastoreKey datastore_key = 2;
+ }
+}
diff --git a/third_party/googleapis/google/pubsub/pubsub.yaml b/third_party/googleapis/google/pubsub/pubsub.yaml
new file mode 100644
index 0000000000..dbe5a49821
--- /dev/null
+++ b/third_party/googleapis/google/pubsub/pubsub.yaml
@@ -0,0 +1,42 @@
+# Google Cloud Pub/Sub API service configuration
+
+type: google.api.Service
+config_version: 3
+name: pubsub.googleapis.com
+
+title: Google Cloud Pub/Sub API
+
+documentation:
+ summary: >
+ Provides reliable, many-to-many, asynchronous messaging between
+ applications.
+ rules:
+ - selector: >
+ google.pubsub.v1.Topic.name,
+ description: |
+ The name of the topic. It must have the format
+ `"projects/{project}/topics/{topic}"`. `{topic}` must start with a letter,
+ and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),
+ underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent
+ signs (`%`). It must be between 3 and 255 characters in length, and it
+ must not start with `"goog"`.
+ - selector: >
+ google.pubsub.v1.Subscription.name,
+ description: |
+ The name of the subscription. It must have the format
+ `"projects/{project}/subscriptions/{subscription}"`. `{subscription}` must
+ start with a letter, and contain only letters (`[A-Za-z]`), numbers
+ (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),
+ plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters
+ in length, and it must not start with `"goog"`.
+
+apis:
+- name: google.pubsub.v1.Publisher
+- name: google.pubsub.v1.Subscriber
+
+authentication:
+ rules:
+ - selector: '*'
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/pubsub,
+ https://www.googleapis.com/auth/cloud-platform
diff --git a/third_party/googleapis/google/pubsub/v1/pubsub.proto b/third_party/googleapis/google/pubsub/v1/pubsub.proto
new file mode 100644
index 0000000000..a5a5760402
--- /dev/null
+++ b/third_party/googleapis/google/pubsub/v1/pubsub.proto
@@ -0,0 +1,687 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.pubsub.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/timestamp.proto";
+
+option cc_enable_arenas = true;
+option csharp_namespace = "Google.Cloud.PubSub.V1";
+option go_package = "google.golang.org/genproto/googleapis/pubsub/v1;pubsub";
+option java_multiple_files = true;
+option java_outer_classname = "PubsubProto";
+option java_package = "com.google.pubsub.v1";
+
+
+// The service that an application uses to manipulate subscriptions and to
+// consume messages from a subscription via the `Pull` method.
+service Subscriber {
+ // Creates a subscription to a given topic.
+ // If the subscription already exists, returns `ALREADY_EXISTS`.
+ // If the corresponding topic doesn't exist, returns `NOT_FOUND`.
+ //
+ // If the name is not provided in the request, the server will assign a random
+ // name for this subscription on the same project as the topic, conforming
+ // to the
+ // [resource name format](https://cloud.google.com/pubsub/docs/overview#names).
+ // The generated name is populated in the returned Subscription object.
+ // Note that for REST API requests, you must specify a name in the request.
+ rpc CreateSubscription(Subscription) returns (Subscription) {
+ option (google.api.http) = { put: "/v1/{name=projects/*/subscriptions/*}" body: "*" };
+ }
+
+ // Gets the configuration details of a subscription.
+ rpc GetSubscription(GetSubscriptionRequest) returns (Subscription) {
+ option (google.api.http) = { get: "/v1/{subscription=projects/*/subscriptions/*}" };
+ }
+
+ // Updates an existing subscription. Note that certain properties of a
+ // subscription, such as its topic, are not modifiable.
+ rpc UpdateSubscription(UpdateSubscriptionRequest) returns (Subscription) {
+ option (google.api.http) = { patch: "/v1/{subscription.name=projects/*/subscriptions/*}" body: "*" };
+ }
+
+ // Lists matching subscriptions.
+ rpc ListSubscriptions(ListSubscriptionsRequest) returns (ListSubscriptionsResponse) {
+ option (google.api.http) = { get: "/v1/{project=projects/*}/subscriptions" };
+ }
+
+ // Deletes an existing subscription. All messages retained in the subscription
+ // are immediately dropped. Calls to `Pull` after deletion will return
+ // `NOT_FOUND`. After a subscription is deleted, a new one may be created with
+ // the same name, but the new one has no association with the old
+ // subscription or its topic unless the same topic is specified.
+ rpc DeleteSubscription(DeleteSubscriptionRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/{subscription=projects/*/subscriptions/*}" };
+ }
+
+ // Modifies the ack deadline for a specific message. This method is useful
+ // to indicate that more time is needed to process a message by the
+ // subscriber, or to make the message available for redelivery if the
+ // processing was interrupted. Note that this does not modify the
+ // subscription-level `ackDeadlineSeconds` used for subsequent messages.
+ rpc ModifyAckDeadline(ModifyAckDeadlineRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { post: "/v1/{subscription=projects/*/subscriptions/*}:modifyAckDeadline" body: "*" };
+ }
+
+ // Acknowledges the messages associated with the `ack_ids` in the
+ // `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages
+ // from the subscription.
+ //
+ // Acknowledging a message whose ack deadline has expired may succeed,
+ // but such a message may be redelivered later. Acknowledging a message more
+ // than once will not result in an error.
+ rpc Acknowledge(AcknowledgeRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { post: "/v1/{subscription=projects/*/subscriptions/*}:acknowledge" body: "*" };
+ }
+
+ // Pulls messages from the server. Returns an empty list if there are no
+ // messages available in the backlog. The server may return `UNAVAILABLE` if
+ // there are too many concurrent pull requests pending for the given
+ // subscription.
+ rpc Pull(PullRequest) returns (PullResponse) {
+ option (google.api.http) = { post: "/v1/{subscription=projects/*/subscriptions/*}:pull" body: "*" };
+ }
+
+ // (EXPERIMENTAL) StreamingPull is an experimental feature. This RPC will
+ // respond with UNIMPLEMENTED errors unless you have been invited to test
+ // this feature. Contact cloud-pubsub@google.com with any questions.
+ //
+ // Establishes a stream with the server, which sends messages down to the
+ // client. The client streams acknowledgements and ack deadline modifications
+ // back to the server. The server will close the stream and return the status
+ // on any error. The server may close the stream with status `OK` to reassign
+ // server-side resources, in which case, the client should re-establish the
+ // stream. `UNAVAILABLE` may also be returned in the case of a transient error
+ // (e.g., a server restart). These should also be retried by the client. Flow
+ // control can be achieved by configuring the underlying RPC channel.
+ rpc StreamingPull(stream StreamingPullRequest) returns (stream StreamingPullResponse);
+
+ // Modifies the `PushConfig` for a specified subscription.
+ //
+ // This may be used to change a push subscription to a pull one (signified by
+ // an empty `PushConfig`) or vice versa, or change the endpoint URL and other
+ // attributes of a push subscription. Messages will accumulate for delivery
+ // continuously through the call regardless of changes to the `PushConfig`.
+ rpc ModifyPushConfig(ModifyPushConfigRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { post: "/v1/{subscription=projects/*/subscriptions/*}:modifyPushConfig" body: "*" };
+ }
+
+ // Lists the existing snapshots.
+ rpc ListSnapshots(ListSnapshotsRequest) returns (ListSnapshotsResponse) {
+ option (google.api.http) = { get: "/v1/{project=projects/*}/snapshots" };
+ }
+
+ // Creates a snapshot from the requested subscription.
+ // If the snapshot already exists, returns `ALREADY_EXISTS`.
+ // If the requested subscription doesn't exist, returns `NOT_FOUND`.
+ //
+ // If the name is not provided in the request, the server will assign a random
+ // name for this snapshot on the same project as the subscription, conforming
+ // to the
+ // [resource name format](https://cloud.google.com/pubsub/docs/overview#names).
+ // The generated name is populated in the returned Snapshot object.
+ // Note that for REST API requests, you must specify a name in the request.
+ rpc CreateSnapshot(CreateSnapshotRequest) returns (Snapshot) {
+ option (google.api.http) = { put: "/v1/{name=projects/*/snapshots/*}" body: "*" };
+ }
+
+ // Removes an existing snapshot. All messages retained in the snapshot
+ // are immediately dropped. After a snapshot is deleted, a new one may be
+ // created with the same name, but the new one has no association with the old
+ // snapshot or its subscription, unless the same subscription is specified.
+ rpc DeleteSnapshot(DeleteSnapshotRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/{snapshot=projects/*/snapshots/*}" };
+ }
+
+ // Seeks an existing subscription to a point in time or to a given snapshot,
+ // whichever is provided in the request.
+ rpc Seek(SeekRequest) returns (SeekResponse) {
+ option (google.api.http) = { post: "/v1/{subscription=projects/*/subscriptions/*}:seek" body: "*" };
+ }
+}
+
+// The service that an application uses to manipulate topics, and to send
+// messages to a topic.
+service Publisher {
+ // Creates the given topic with the given name.
+ rpc CreateTopic(Topic) returns (Topic) {
+ option (google.api.http) = { put: "/v1/{name=projects/*/topics/*}" body: "*" };
+ }
+
+ // Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic
+ // does not exist. The message payload must not be empty; it must contain
+ // either a non-empty data field, or at least one attribute.
+ rpc Publish(PublishRequest) returns (PublishResponse) {
+ option (google.api.http) = { post: "/v1/{topic=projects/*/topics/*}:publish" body: "*" };
+ }
+
+ // Gets the configuration of a topic.
+ rpc GetTopic(GetTopicRequest) returns (Topic) {
+ option (google.api.http) = { get: "/v1/{topic=projects/*/topics/*}" };
+ }
+
+ // Lists matching topics.
+ rpc ListTopics(ListTopicsRequest) returns (ListTopicsResponse) {
+ option (google.api.http) = { get: "/v1/{project=projects/*}/topics" };
+ }
+
+ // Lists the name of the subscriptions for this topic.
+ rpc ListTopicSubscriptions(ListTopicSubscriptionsRequest) returns (ListTopicSubscriptionsResponse) {
+ option (google.api.http) = { get: "/v1/{topic=projects/*/topics/*}/subscriptions" };
+ }
+
+ // Deletes the topic with the given name. Returns `NOT_FOUND` if the topic
+ // does not exist. After a topic is deleted, a new topic may be created with
+ // the same name; this is an entirely new topic with none of the old
+ // configuration or subscriptions. Existing subscriptions to this topic are
+ // not deleted, but their `topic` field is set to `_deleted-topic_`.
+ rpc DeleteTopic(DeleteTopicRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/{topic=projects/*/topics/*}" };
+ }
+}
+
+// A topic resource.
+message Topic {
+ // The name of the topic. It must have the format
+ // `"projects/{project}/topics/{topic}"`. `{topic}` must start with a letter,
+ // and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),
+ // underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent
+ // signs (`%`). It must be between 3 and 255 characters in length, and it
+ // must not start with `"goog"`.
+ string name = 1;
+}
+
+// A message data and its attributes. The message payload must not be empty;
+// it must contain either a non-empty data field, or at least one attribute.
+message PubsubMessage {
+ // The message payload.
+ bytes data = 1;
+
+ // Optional attributes for this message.
+ map<string, string> attributes = 2;
+
+ // ID of this message, assigned by the server when the message is published.
+ // Guaranteed to be unique within the topic. This value may be read by a
+ // subscriber that receives a `PubsubMessage` via a `Pull` call or a push
+ // delivery. It must not be populated by the publisher in a `Publish` call.
+ string message_id = 3;
+
+ // The time at which the message was published, populated by the server when
+ // it receives the `Publish` call. It must not be populated by the
+ // publisher in a `Publish` call.
+ google.protobuf.Timestamp publish_time = 4;
+}
+
+// Request for the GetTopic method.
+message GetTopicRequest {
+ // The name of the topic to get.
+ // Format is `projects/{project}/topics/{topic}`.
+ string topic = 1;
+}
+
+// Request for the Publish method.
+message PublishRequest {
+ // The messages in the request will be published on this topic.
+ // Format is `projects/{project}/topics/{topic}`.
+ string topic = 1;
+
+ // The messages to publish.
+ repeated PubsubMessage messages = 2;
+}
+
+// Response for the `Publish` method.
+message PublishResponse {
+ // The server-assigned ID of each published message, in the same order as
+ // the messages in the request. IDs are guaranteed to be unique within
+ // the topic.
+ repeated string message_ids = 1;
+}
+
+// Request for the `ListTopics` method.
+message ListTopicsRequest {
+ // The name of the cloud project that topics belong to.
+ // Format is `projects/{project}`.
+ string project = 1;
+
+ // Maximum number of topics to return.
+ int32 page_size = 2;
+
+ // The value returned by the last `ListTopicsResponse`; indicates that this is
+ // a continuation of a prior `ListTopics` call, and that the system should
+ // return the next page of data.
+ string page_token = 3;
+}
+
+// Response for the `ListTopics` method.
+message ListTopicsResponse {
+ // The resulting topics.
+ repeated Topic topics = 1;
+
+ // If not empty, indicates that there may be more topics that match the
+ // request; this value should be passed in a new `ListTopicsRequest`.
+ string next_page_token = 2;
+}
+
+// Request for the `ListTopicSubscriptions` method.
+message ListTopicSubscriptionsRequest {
+ // The name of the topic that subscriptions are attached to.
+ // Format is `projects/{project}/topics/{topic}`.
+ string topic = 1;
+
+ // Maximum number of subscription names to return.
+ int32 page_size = 2;
+
+ // The value returned by the last `ListTopicSubscriptionsResponse`; indicates
+ // that this is a continuation of a prior `ListTopicSubscriptions` call, and
+ // that the system should return the next page of data.
+ string page_token = 3;
+}
+
+// Response for the `ListTopicSubscriptions` method.
+message ListTopicSubscriptionsResponse {
+ // The names of the subscriptions that match the request.
+ repeated string subscriptions = 1;
+
+ // If not empty, indicates that there may be more subscriptions that match
+ // the request; this value should be passed in a new
+ // `ListTopicSubscriptionsRequest` to get more subscriptions.
+ string next_page_token = 2;
+}
+
+// Request for the `DeleteTopic` method.
+message DeleteTopicRequest {
+ // Name of the topic to delete.
+ // Format is `projects/{project}/topics/{topic}`.
+ string topic = 1;
+}
+
+// A subscription resource.
+message Subscription {
+ // The name of the subscription. It must have the format
+ // `"projects/{project}/subscriptions/{subscription}"`. `{subscription}` must
+ // start with a letter, and contain only letters (`[A-Za-z]`), numbers
+ // (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),
+ // plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters
+ // in length, and it must not start with `"goog"`.
+ string name = 1;
+
+ // The name of the topic from which this subscription is receiving messages.
+ // Format is `projects/{project}/topics/{topic}`.
+ // The value of this field will be `_deleted-topic_` if the topic has been
+ // deleted.
+ string topic = 2;
+
+ // If push delivery is used with this subscription, this field is
+ // used to configure it. An empty `pushConfig` signifies that the subscriber
+ // will pull and ack messages using API methods.
+ PushConfig push_config = 4;
+
+ // This value is the maximum time after a subscriber receives a message
+ // before the subscriber should acknowledge the message. After message
+ // delivery but before the ack deadline expires and before the message is
+ // acknowledged, it is an outstanding message and will not be delivered
+ // again during that time (on a best-effort basis).
+ //
+ // For pull subscriptions, this value is used as the initial value for the ack
+ // deadline. To override this value for a given message, call
+ // `ModifyAckDeadline` with the corresponding `ack_id` if using
+ // pull.
+ // The minimum custom deadline you can specify is 10 seconds.
+ // The maximum custom deadline you can specify is 600 seconds (10 minutes).
+ // If this parameter is 0, a default value of 10 seconds is used.
+ //
+ // For push delivery, this value is also used to set the request timeout for
+ // the call to the push endpoint.
+ //
+ // If the subscriber never acknowledges the message, the Pub/Sub
+ // system will eventually redeliver the message.
+ int32 ack_deadline_seconds = 5;
+
+ // Indicates whether to retain acknowledged messages. If true, then
+ // messages are not expunged from the subscription's backlog, even if they are
+ // acknowledged, until they fall out of the `message_retention_duration`
+ // window.
+ bool retain_acked_messages = 7;
+
+ // How long to retain unacknowledged messages in the subscription's backlog,
+ // from the moment a message is published.
+ // If `retain_acked_messages` is true, then this also configures the retention
+ // of acknowledged messages, and thus configures how far back in time a `Seek`
+ // can be done. Defaults to 7 days. Cannot be more than 7 days or less than 10
+ // minutes.
+ google.protobuf.Duration message_retention_duration = 8;
+}
+
+// Configuration for a push delivery endpoint.
+message PushConfig {
+ // A URL locating the endpoint to which messages should be pushed.
+ // For example, a Webhook endpoint might use "https://example.com/push".
+ string push_endpoint = 1;
+
+ // Endpoint configuration attributes.
+ //
+ // Every endpoint has a set of API supported attributes that can be used to
+ // control different aspects of the message delivery.
+ //
+ // The currently supported attribute is `x-goog-version`, which you can
+ // use to change the format of the pushed message. This attribute
+ // indicates the version of the data expected by the endpoint. This
+ // controls the shape of the pushed message (i.e., its fields and metadata).
+ // The endpoint version is based on the version of the Pub/Sub API.
+ //
+ // If not present during the `CreateSubscription` call, it will default to
+ // the version of the API used to make such call. If not present during a
+ // `ModifyPushConfig` call, its value will not be changed. `GetSubscription`
+ // calls will always return a valid version, even if the subscription was
+ // created without this attribute.
+ //
+ // The possible values for this attribute are:
+ //
+ // * `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API.
+ // * `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub API.
+ map<string, string> attributes = 2;
+}
+
+// A message and its corresponding acknowledgment ID.
+message ReceivedMessage {
+ // This ID can be used to acknowledge the received message.
+ string ack_id = 1;
+
+ // The message.
+ PubsubMessage message = 2;
+}
+
+// Request for the GetSubscription method.
+message GetSubscriptionRequest {
+ // The name of the subscription to get.
+ // Format is `projects/{project}/subscriptions/{sub}`.
+ string subscription = 1;
+}
+
+// Request for the UpdateSubscription method.
+message UpdateSubscriptionRequest {
+ // The updated subscription object.
+ Subscription subscription = 1;
+
+ // Indicates which fields in the provided subscription to update.
+ // Must be specified and non-empty.
+ google.protobuf.FieldMask update_mask = 2;
+}
+
+// Request for the `ListSubscriptions` method.
+message ListSubscriptionsRequest {
+ // The name of the cloud project that subscriptions belong to.
+ // Format is `projects/{project}`.
+ string project = 1;
+
+ // Maximum number of subscriptions to return.
+ int32 page_size = 2;
+
+ // The value returned by the last `ListSubscriptionsResponse`; indicates that
+ // this is a continuation of a prior `ListSubscriptions` call, and that the
+ // system should return the next page of data.
+ string page_token = 3;
+}
+
+// Response for the `ListSubscriptions` method.
+message ListSubscriptionsResponse {
+ // The subscriptions that match the request.
+ repeated Subscription subscriptions = 1;
+
+ // If not empty, indicates that there may be more subscriptions that match
+ // the request; this value should be passed in a new
+ // `ListSubscriptionsRequest` to get more subscriptions.
+ string next_page_token = 2;
+}
+
+// Request for the DeleteSubscription method.
+message DeleteSubscriptionRequest {
+ // The subscription to delete.
+ // Format is `projects/{project}/subscriptions/{sub}`.
+ string subscription = 1;
+}
+
+// Request for the ModifyPushConfig method.
+message ModifyPushConfigRequest {
+ // The name of the subscription.
+ // Format is `projects/{project}/subscriptions/{sub}`.
+ string subscription = 1;
+
+ // The push configuration for future deliveries.
+ //
+ // An empty `pushConfig` indicates that the Pub/Sub system should
+ // stop pushing messages from the given subscription and allow
+ // messages to be pulled and acknowledged - effectively pausing
+ // the subscription if `Pull` is not called.
+ PushConfig push_config = 2;
+}
+
+// Request for the `Pull` method.
+message PullRequest {
+ // The subscription from which messages should be pulled.
+ // Format is `projects/{project}/subscriptions/{sub}`.
+ string subscription = 1;
+
+ // If this field set to true, the system will respond immediately even if
+ // it there are no messages available to return in the `Pull` response.
+ // Otherwise, the system may wait (for a bounded amount of time) until at
+ // least one message is available, rather than returning no messages. The
+ // client may cancel the request if it does not wish to wait any longer for
+ // the response.
+ bool return_immediately = 2;
+
+ // The maximum number of messages returned for this request. The Pub/Sub
+ // system may return fewer than the number specified.
+ int32 max_messages = 3;
+}
+
+// Response for the `Pull` method.
+message PullResponse {
+ // Received Pub/Sub messages. The Pub/Sub system will return zero messages if
+ // there are no more available in the backlog. The Pub/Sub system may return
+ // fewer than the `maxMessages` requested even if there are more messages
+ // available in the backlog.
+ repeated ReceivedMessage received_messages = 1;
+}
+
+// Request for the ModifyAckDeadline method.
+message ModifyAckDeadlineRequest {
+ // The name of the subscription.
+ // Format is `projects/{project}/subscriptions/{sub}`.
+ string subscription = 1;
+
+ // List of acknowledgment IDs.
+ repeated string ack_ids = 4;
+
+ // The new ack deadline with respect to the time this request was sent to
+ // the Pub/Sub system. For example, if the value is 10, the new
+ // ack deadline will expire 10 seconds after the `ModifyAckDeadline` call
+ // was made. Specifying zero may immediately make the message available for
+ // another pull request.
+ // The minimum deadline you can specify is 0 seconds.
+ // The maximum deadline you can specify is 600 seconds (10 minutes).
+ int32 ack_deadline_seconds = 3;
+}
+
+// Request for the Acknowledge method.
+message AcknowledgeRequest {
+ // The subscription whose message is being acknowledged.
+ // Format is `projects/{project}/subscriptions/{sub}`.
+ string subscription = 1;
+
+ // The acknowledgment ID for the messages being acknowledged that was returned
+ // by the Pub/Sub system in the `Pull` response. Must not be empty.
+ repeated string ack_ids = 2;
+}
+
+// Request for the `StreamingPull` streaming RPC method. This request is used to
+// establish the initial stream as well as to stream acknowledgements and ack
+// deadline modifications from the client to the server.
+message StreamingPullRequest {
+ // The subscription for which to initialize the new stream. This must be
+ // provided in the first request on the stream, and must not be set in
+ // subsequent requests from client to server.
+ // Format is `projects/{project}/subscriptions/{sub}`.
+ string subscription = 1;
+
+ // List of acknowledgement IDs for acknowledging previously received messages
+ // (received on this stream or a different stream). If an ack ID has expired,
+ // the corresponding message may be redelivered later. Acknowledging a message
+ // more than once will not result in an error. If the acknowledgement ID is
+ // malformed, the stream will be aborted with status `INVALID_ARGUMENT`.
+ repeated string ack_ids = 2;
+
+ // The list of new ack deadlines for the IDs listed in
+ // `modify_deadline_ack_ids`. The size of this list must be the same as the
+ // size of `modify_deadline_ack_ids`. If it differs the stream will be aborted
+ // with `INVALID_ARGUMENT`. Each element in this list is applied to the
+ // element in the same position in `modify_deadline_ack_ids`. The new ack
+ // deadline is with respect to the time this request was sent to the Pub/Sub
+ // system. Must be >= 0. For example, if the value is 10, the new ack deadline
+ // will expire 10 seconds after this request is received. If the value is 0,
+ // the message is immediately made available for another streaming or
+ // non-streaming pull request. If the value is < 0 (an error), the stream will
+ // be aborted with status `INVALID_ARGUMENT`.
+ repeated int32 modify_deadline_seconds = 3;
+
+ // List of acknowledgement IDs whose deadline will be modified based on the
+ // corresponding element in `modify_deadline_seconds`. This field can be used
+ // to indicate that more time is needed to process a message by the
+ // subscriber, or to make the message available for redelivery if the
+ // processing was interrupted.
+ repeated string modify_deadline_ack_ids = 4;
+
+ // The ack deadline to use for the stream. This must be provided in the
+ // first request on the stream, but it can also be updated on subsequent
+ // requests from client to server. The minimum deadline you can specify is 10
+ // seconds. The maximum deadline you can specify is 600 seconds (10 minutes).
+ int32 stream_ack_deadline_seconds = 5;
+}
+
+// Response for the `StreamingPull` method. This response is used to stream
+// messages from the server to the client.
+message StreamingPullResponse {
+ // Received Pub/Sub messages. This will not be empty.
+ repeated ReceivedMessage received_messages = 1;
+}
+
+// Request for the `CreateSnapshot` method.
+message CreateSnapshotRequest {
+ // Optional user-provided name for this snapshot.
+ // If the name is not provided in the request, the server will assign a random
+ // name for this snapshot on the same project as the subscription.
+ // Note that for REST API requests, you must specify a name.
+ // Format is `projects/{project}/snapshots/{snap}`.
+ string name = 1;
+
+ // The subscription whose backlog the snapshot retains.
+ // Specifically, the created snapshot is guaranteed to retain:
+ // (a) The existing backlog on the subscription. More precisely, this is
+ // defined as the messages in the subscription's backlog that are
+ // unacknowledged upon the successful completion of the
+ // `CreateSnapshot` request; as well as:
+ // (b) Any messages published to the subscription's topic following the
+ // successful completion of the CreateSnapshot request.
+ // Format is `projects/{project}/subscriptions/{sub}`.
+ string subscription = 2;
+}
+
+// A snapshot resource.
+message Snapshot {
+ // The name of the snapshot.
+ string name = 1;
+
+ // The name of the topic from which this snapshot is retaining messages.
+ string topic = 2;
+
+ // The snapshot is guaranteed to exist up until this time.
+ // A newly-created snapshot expires no later than 7 days from the time of its
+ // creation. Its exact lifetime is determined at creation by the existing
+ // backlog in the source subscription. Specifically, the lifetime of the
+ // snapshot is `7 days - (age of oldest unacked message in the subscription)`.
+ // For example, consider a subscription whose oldest unacked message is 3 days
+ // old. If a snapshot is created from this subscription, the snapshot -- which
+ // will always capture this 3-day-old backlog as long as the snapshot
+ // exists -- will expire in 4 days.
+ google.protobuf.Timestamp expire_time = 3;
+}
+
+// Request for the `ListSnapshots` method.
+message ListSnapshotsRequest {
+ // The name of the cloud project that snapshots belong to.
+ // Format is `projects/{project}`.
+ string project = 1;
+
+ // Maximum number of snapshots to return.
+ int32 page_size = 2;
+
+ // The value returned by the last `ListSnapshotsResponse`; indicates that this
+ // is a continuation of a prior `ListSnapshots` call, and that the system
+ // should return the next page of data.
+ string page_token = 3;
+}
+
+// Response for the `ListSnapshots` method.
+message ListSnapshotsResponse {
+ // The resulting snapshots.
+ repeated Snapshot snapshots = 1;
+
+ // If not empty, indicates that there may be more snapshot that match the
+ // request; this value should be passed in a new `ListSnapshotsRequest`.
+ string next_page_token = 2;
+}
+
+// Request for the `DeleteSnapshot` method.
+message DeleteSnapshotRequest {
+ // The name of the snapshot to delete.
+ // Format is `projects/{project}/snapshots/{snap}`.
+ string snapshot = 1;
+}
+
+// Request for the `Seek` method.
+message SeekRequest {
+ // The subscription to affect.
+ string subscription = 1;
+
+ oneof target {
+ // The time to seek to.
+ // Messages retained in the subscription that were published before this
+ // time are marked as acknowledged, and messages retained in the
+ // subscription that were published after this time are marked as
+ // unacknowledged. Note that this operation affects only those messages
+ // retained in the subscription (configured by the combination of
+ // `message_retention_duration` and `retain_acked_messages`). For example,
+ // if `time` corresponds to a point before the message retention
+ // window (or to a point before the system's notion of the subscription
+ // creation time), only retained messages will be marked as unacknowledged,
+ // and already-expunged messages will not be restored.
+ google.protobuf.Timestamp time = 2;
+
+ // The snapshot to seek to. The snapshot's topic must be the same as that of
+ // the provided subscription.
+ // Format is `projects/{project}/snapshots/{snap}`.
+ string snapshot = 3;
+ }
+}
+
+message SeekResponse {
+
+}
diff --git a/third_party/googleapis/google/pubsub/v1/pubsub_gapic.yaml b/third_party/googleapis/google/pubsub/v1/pubsub_gapic.yaml
new file mode 100644
index 0000000000..09cd93427b
--- /dev/null
+++ b/third_party/googleapis/google/pubsub/v1/pubsub_gapic.yaml
@@ -0,0 +1,680 @@
+type: com.google.api.codegen.ConfigProto
+language_settings:
+ java:
+ package_name: com.google.cloud.pubsub.spi.v1
+ interface_names:
+ google.pubsub.v1.Publisher: TopicAdmin
+ google.pubsub.v1.Subscriber: SubscriptionAdmin
+ python:
+ package_name: google.cloud.gapic.pubsub.v1
+ go:
+ package_name: cloud.google.com/go/pubsub/apiv1
+ domain_layer_location: cloud.google.com/go/pubsub
+ csharp:
+ package_name: Google.Cloud.PubSub.V1
+ ruby:
+ package_name: Google::Cloud::Pubsub::V1
+ php:
+ package_name: Google\Cloud\PubSub\V1
+ nodejs:
+ package_name: pubsub.v1
+ domain_layer_location: google-cloud
+license_header:
+ copyright_file: copyright-google.txt
+ license_file: license-header-apache-2.0.txt
+fixed_resource_name_values:
+- entity_name: deleted_topic
+ fixed_value: _deleted-topic_
+collection_oneofs:
+- oneof_name: topic_oneof
+ collection_names:
+ - topic
+ - deleted_topic
+interfaces:
+- name: google.pubsub.v1.Subscriber
+ lang_doc:
+ java: To retrieve messages from a subscription, see the Subscriber class.
+ collections:
+ - name_pattern: projects/{project}
+ entity_name: project
+ - name_pattern: projects/{project}/snapshots/{snapshot}
+ entity_name: snapshot
+ - name_pattern: projects/{project}/subscriptions/{subscription}
+ entity_name: subscription
+ - name_pattern: projects/{project}/topics/{topic}
+ entity_name: topic
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - name: pull
+ retry_codes:
+ - DEADLINE_EXCEEDED
+ - INTERNAL
+ - CANCELLED
+ - RESOURCE_EXHAUSTED
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000 # 60 seconds
+ initial_rpc_timeout_millis: 60000 # 60 seconds
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 60000 # 60 seconds
+ total_timeout_millis: 600000 # 10 minutes
+ - name: messaging
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000 # 60 seconds
+ initial_rpc_timeout_millis: 12000 # 12 seconds
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 12000 # 12 seconds
+ total_timeout_millis: 600000 # 10 minutes
+ experimental_features:
+ iam_resources:
+ - type: google.pubsub.v1.Subscription
+ field: name
+ - type: google.pubsub.v1.Topic
+ field: name
+ methods:
+ - name: CreateSubscription
+ flattening:
+ groups:
+ - parameters:
+ - name
+ - topic
+ - push_config
+ - ack_deadline_seconds
+ required_fields:
+ - name
+ - topic
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: subscription
+ topic: topic
+ timeout_millis: 60000
+ - name: GetSubscription
+ flattening:
+ groups:
+ - parameters:
+ - subscription
+ required_fields:
+ - subscription
+ request_object_method: false
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ subscription: subscription
+ timeout_millis: 60000
+ - name: UpdateSubscription
+ required_fields:
+ - subscription
+ - update_mask
+ request_object_method: true
+ retry_codes_name: idempotent
+ retry_params_name: default
+ timeout_millis: 60000
+ surface_treatments:
+ - include_languages:
+ - java
+ visibility: PACKAGE
+ - name: ListSubscriptions
+ flattening:
+ groups:
+ - parameters:
+ - project
+ required_fields:
+ - project
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: subscriptions
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ project: project
+ timeout_millis: 60000
+ - name: DeleteSubscription
+ flattening:
+ groups:
+ - parameters:
+ - subscription
+ required_fields:
+ - subscription
+ request_object_method: false
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ subscription: subscription
+ timeout_millis: 60000
+ - name: ModifyAckDeadline
+ flattening:
+ groups:
+ - parameters:
+ - subscription
+ - ack_ids
+ - ack_deadline_seconds
+ required_fields:
+ - subscription
+ - ack_ids
+ - ack_deadline_seconds
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ subscription: subscription
+ timeout_millis: 60000
+ surface_treatments:
+ - include_languages:
+ - java
+ visibility: PACKAGE
+ - name: Acknowledge
+ flattening:
+ groups:
+ - parameters:
+ - subscription
+ - ack_ids
+ required_fields:
+ - subscription
+ - ack_ids
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: messaging
+ field_name_patterns:
+ subscription: subscription
+ timeout_millis: 60000
+ surface_treatments:
+ - include_languages:
+ - java
+ visibility: PACKAGE
+ - name: Pull
+ flattening:
+ groups:
+ - parameters:
+ - subscription
+ - return_immediately
+ - max_messages
+ required_fields:
+ - subscription
+ - max_messages
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: pull
+ retry_params_name: messaging
+ field_name_patterns:
+ subscription: subscription
+ timeout_millis: 60000
+ surface_treatments:
+ - include_languages:
+ - java
+ visibility: PACKAGE
+ - name: StreamingPull
+ required_fields:
+ - subscription
+ - stream_ack_deadline_seconds
+ grpc_streaming:
+ response:
+ resources_field: received_messages
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: pull
+ retry_params_name: messaging
+ field_name_patterns:
+ subscription: subscription
+ timeout_millis: 60000
+ surface_treatments:
+ - include_languages:
+ - java
+ visibility: PACKAGE
+ - name: ModifyPushConfig
+ flattening:
+ groups:
+ - parameters:
+ - subscription
+ - push_config
+ required_fields:
+ - subscription
+ - push_config
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ subscription: subscription
+ timeout_millis: 60000
+ - name: ListSnapshots
+ flattening:
+ groups:
+ - parameters:
+ - project
+ required_fields:
+ - project
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: snapshots
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ project: project
+ timeout_millis: 60000
+ - name: CreateSnapshot
+ flattening:
+ groups:
+ - parameters:
+ - name
+ - subscription
+ required_fields:
+ - name
+ - subscription
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: snapshot
+ subscription: subscription
+ timeout_millis: 60000
+ - name: DeleteSnapshot
+ flattening:
+ groups:
+ - parameters:
+ - snapshot
+ required_fields:
+ - snapshot
+ request_object_method: false
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ snapshot: snapshot
+ timeout_millis: 60000
+ - name: Seek
+ # Not including flattening until oneof support.
+ # https://github.com/googleapis/toolkit/issues/1057
+ required_fields:
+ - subscription
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ subscription: subscription
+ timeout_millis: 60000
+ - name: SetIamPolicy
+ flattening:
+ groups:
+ - parameters:
+ - resource
+ - policy
+ required_fields:
+ - resource
+ - policy
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ resource: subscription
+ timeout_millis: 60000
+ reroute_to_grpc_interface: google.iam.v1.IAMPolicy
+ surface_treatments:
+ - include_languages:
+ - go
+ visibility: DISABLED
+ - name: GetIamPolicy
+ flattening:
+ groups:
+ - parameters:
+ - resource
+ required_fields:
+ - resource
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ resource: subscription
+ timeout_millis: 60000
+ reroute_to_grpc_interface: google.iam.v1.IAMPolicy
+ surface_treatments:
+ - include_languages:
+ - go
+ visibility: DISABLED
+ - name: TestIamPermissions
+ flattening:
+ groups:
+ - parameters:
+ - resource
+ - permissions
+ required_fields:
+ - resource
+ - permissions
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ resource: subscription
+ timeout_millis: 60000
+ reroute_to_grpc_interface: google.iam.v1.IAMPolicy
+ surface_treatments:
+ - include_languages:
+ - go
+ visibility: DISABLED
+- name: google.pubsub.v1.Publisher
+ lang_doc:
+ java: To publish messages to a topic, see the Publisher class.
+ smoke_test:
+ method: CreateTopic
+ init_fields:
+ - name%project=$PROJECT_ID
+ - name%topic="smoketesttopic-$RANDOM"
+ collections:
+ - name_pattern: projects/{project}
+ entity_name: project
+ - name_pattern: projects/{project}/topics/{topic}
+ entity_name: topic
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: one_plus_delivery
+ retry_codes:
+ - ABORTED
+ - CANCELLED
+ - INTERNAL
+ - RESOURCE_EXHAUSTED
+ - UNKNOWN
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000 # 60 seconds
+ initial_rpc_timeout_millis: 60000 # 60 seconds
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 60000 # 60 seconds
+ total_timeout_millis: 600000 # 10 minutes
+ - name: messaging
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000 # 60 seconds
+ initial_rpc_timeout_millis: 12000 # 12 seconds
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 12000 # 12 seconds
+ total_timeout_millis: 600000 # 10 minutes
+ experimental_features:
+ iam_resources:
+ - type: google.pubsub.v1.Subscription
+ field: name
+ - type: google.pubsub.v1.Topic
+ field: name
+ methods:
+ - name: CreateTopic
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: false
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: topic
+ timeout_millis: 60000
+ - name: Publish
+ flattening:
+ groups:
+ - parameters:
+ - topic
+ - messages
+ required_fields:
+ - topic
+ - messages
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: one_plus_delivery
+ retry_params_name: messaging
+ batching:
+ thresholds:
+ element_count_threshold: 10
+ element_count_limit: 1000 # TO BE REMOVED LATER
+ request_byte_threshold: 1024 # 1 Kb
+ request_byte_limit: 10485760 # TO BE REMOVED LATER
+ delay_threshold_millis: 10
+ batch_descriptor:
+ batched_field: messages
+ discriminator_fields:
+ - topic
+ subresponse_field: message_ids
+ field_name_patterns:
+ topic: topic
+ sample_code_init_fields:
+ - messages[0].data
+ timeout_millis: 60000
+ surface_treatments:
+ - include_languages:
+ - java
+ visibility: PACKAGE
+ - name: GetTopic
+ flattening:
+ groups:
+ - parameters:
+ - topic
+ required_fields:
+ - topic
+ request_object_method: false
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ topic: topic
+ timeout_millis: 60000
+ - name: ListTopics
+ flattening:
+ groups:
+ - parameters:
+ - project
+ required_fields:
+ - project
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: topics
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ project: project
+ timeout_millis: 60000
+ - name: ListTopicSubscriptions
+ flattening:
+ groups:
+ - parameters:
+ - topic
+ required_fields:
+ - topic
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: subscriptions
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ topic: topic
+ timeout_millis: 60000
+ - name: DeleteTopic
+ flattening:
+ groups:
+ - parameters:
+ - topic
+ required_fields:
+ - topic
+ request_object_method: false
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ topic: topic
+ timeout_millis: 60000
+ - name: SetIamPolicy
+ flattening:
+ groups:
+ - parameters:
+ - resource
+ - policy
+ required_fields:
+ - resource
+ - policy
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ resource: topic
+ timeout_millis: 60000
+ reroute_to_grpc_interface: google.iam.v1.IAMPolicy
+ surface_treatments:
+ - include_languages:
+ - go
+ visibility: DISABLED
+ - name: GetIamPolicy
+ flattening:
+ groups:
+ - parameters:
+ - resource
+ required_fields:
+ - resource
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ resource: topic
+ timeout_millis: 60000
+ reroute_to_grpc_interface: google.iam.v1.IAMPolicy
+ surface_treatments:
+ - include_languages:
+ - go
+ visibility: DISABLED
+ - name: TestIamPermissions
+ flattening:
+ groups:
+ - parameters:
+ - resource
+ - permissions
+ required_fields:
+ - resource
+ - permissions
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ resource: topic
+ timeout_millis: 60000
+ reroute_to_grpc_interface: google.iam.v1.IAMPolicy
+ surface_treatments:
+ - include_languages:
+ - go
+ visibility: DISABLED
+resource_name_generation:
+- message_name: Topic
+ field_entity_map:
+ name: topic
+- message_name: GetTopicRequest
+ field_entity_map:
+ topic: topic
+- message_name: PublishRequest
+ field_entity_map:
+ topic: topic
+- message_name: ListTopicsRequest
+ field_entity_map:
+ project: project
+- message_name: ListTopicSubscriptionsRequest
+ field_entity_map:
+ topic: topic
+- message_name: ListTopicSubscriptionsResponse
+ field_entity_map:
+ subscriptions: subscription
+- message_name: DeleteTopicRequest
+ field_entity_map:
+ topic: topic
+- message_name: Subscription
+ field_entity_map:
+ name: subscription
+ topic: topic_oneof
+- message_name: GetSubscriptionRequest
+ field_entity_map:
+ subscription: subscription
+- message_name: ListSubscriptionsRequest
+ field_entity_map:
+ project: project
+- message_name: DeleteSubscriptionRequest
+ field_entity_map:
+ subscription: subscription
+- message_name: ModifyPushConfigRequest
+ field_entity_map:
+ subscription: subscription
+- message_name: PullRequest
+ field_entity_map:
+ subscription: subscription
+- message_name: ModifyAckDeadlineRequest
+ field_entity_map:
+ subscription: subscription
+- message_name: AcknowledgeRequest
+ field_entity_map:
+ subscription: subscription
+- message_name: StreamingPullRequest
+ field_entity_map:
+ subscription: subscription
+- message_name: Snapshot
+ field_entity_map:
+ name: snapshot
+ topic: topic
+- message_name: CreateSnapshotRequest
+ field_entity_map:
+ name: snapshot
+ subscription: subscription
+- message_name: ListSnapshotsRequest
+ field_entity_map:
+ project: project
+- message_name: DeleteSnapshotRequest
+ field_entity_map:
+ snapshot: snapshot
+- message_name: SeekRequest
+ field_entity_map:
+ subscription: subscription
+ snapshot: snapshot
diff --git a/third_party/googleapis/google/pubsub/v1beta2/README.md b/third_party/googleapis/google/pubsub/v1beta2/README.md
new file mode 100644
index 0000000000..120a818280
--- /dev/null
+++ b/third_party/googleapis/google/pubsub/v1beta2/README.md
@@ -0,0 +1,156 @@
+## Overview
+This file describes an API for a Pub/Sub (Publish/Subscribe) system. This system
+provides a reliable many-to-many communication mechanism between independently
+written publishers and subscribers where the publisher publishes messages to
+*topics* and each subscriber creates a *subscription* and consumes *messages*
+from it.
+
+1. The Pub/Sub system maintains bindings between topics and subscriptions.
+2. A publisher publishes messages into a topic.
+3. The Pub/Sub system delivers messages from topics into attached
+ subscriptions.
+4. A subscriber receives pending messages from its subscription and
+ acknowledges each one to the Pub/Sub system.
+5. The Pub/Sub system removes acknowledged messages from that subscription.
+
+## Data Model
+The data model consists of the following:
+
+* **Topic**: A topic is a resource to which messages are published by
+ publishers. Topics are named, and the name of the topic is unique within the
+ Pub/Sub system.
+
+* **Subscription**: A subscription records the subscriber's interest in a
+ topic. The Pub/Sub system maintains those messages which still need
+ to be delivered and acknowledged so that they can retried as needed.
+ The set of messages that have not been acknowledged is called the
+ subscription backlog.
+
+* **Message**: A message is a unit of data that flows in the system. It
+ contains opaque data from the publisher along with its *attributes*.
+
+* **Message Attributes** (optional): A set of opaque key-value pairs assigned
+ by the publisher to a message. Attributes are delivered unmodified to
+ subscribers together with the message data, if there's any.
+
+## Publisher Flow
+A publisher publishes messages to the topic using the `Publish` call:
+
+```data
+PubsubMessage message;
+message.set_data("....");
+message.attributes.put("key1", "value1");
+PublishRequest request;
+request.set_topic("topicName");
+request.add_message(message);
+Publisher.Publish(request);
+```
+
+## Subscriber Flow
+The subscriber part of the API is richer than the publisher part and has a
+number of concepts for subscription creation and use:
+
+1. A subscriber (user or process) creates a subscription using the
+ `CreateSubscription` call.
+
+2. A subscriber receives messages in one of two ways: via pull or push.
+
+ * To receive messages via pull, a subscriber calls the `Pull` method on the
+ `Subscriber` to get messages from the subscription. For each individual
+ message, the subscriber may use the `ack_id` received in the
+ `PullResponse` to `Acknowledge` the message, or modify the *ack deadline*
+ with `ModifyAckDeadline`. See the `Subscription.ack_deadline_seconds`
+ field documentation for details on the ack deadline behavior. Messages
+ must be acknowledged or they will be redelivered in a future `Pull` call.
+
+ **Note:** Messages may be consumed in parallel by multiple processes
+ making `Pull` calls to the same subscription; this will result in the set
+ of messages from the subscription being split among the processes, each
+ process receiving a subset of the messages.
+
+ * To receive messages via push, the `PushConfig` field must be specified in
+ the `Subscription` parameter when creating a subscription, or set with
+ `ModifyPushConfig`. The PushConfig specifies an endpoint at which the
+ subscriber exposes the `PushEndpointService` or some other handler,
+ depending on the endpoint. Messages are received via the
+ `ProcessPushMessage` method. The push subscriber responds to the method
+ with a result code that indicates one of three things: `Acknowledge` (the
+ message has been successfully processed and the Pub/Sub system may delete
+ it), `Nack` (the message has been rejected and the Pub/Sub system should
+ resend it at a later time).
+
+ **Note:** The endpoint may be a load balancer for better scalability, so
+ that multiple processes may handle the message processing load.
+
+Subscription creation:
+
+```data
+Subscription subscription;
+subscription.set_topic("topicName");
+subscription.set_name("subscriptionName");
+subscription.push_config().set_push_endpoint("machinename:8888");
+Subscriber.CreateSubscription(subscription);
+```
+
+Consuming messages via push:
+
+```data
+// The port 'machinename:8888' must be bound to a stubby server that
+// implements the PushEndpointService with the following method.
+// (This example assumes the push endpoint has a single subscription
+// called "subName", though in general a single push endpoint might
+// have multiple subscriptions.)
+
+int ProcessPushMessage(
+ ProcessPushMessageRequest request,
+ ProcessPushMessageResponse *response) {
+ if (request.subscription().equals("subscriptionName")) {
+ Process(request.message().data());
+ }
+ *response = ProcessPushMessageResponse.default();
+ return OK; // This return code implies an acknowledgment
+}
+```
+
+Consuming messages via pull:
+
+```data
+// The subscription must be created without setting the push_config field.
+
+PullRequest pull_request;
+pull_request.set_subscription("subscriptionName");
+pull_request.set_return_immediately(false);
+pull_request.set_max_messages(10);
+while (true) {
+ PullResponse pull_response;
+ AcknowledgeRequest ack_request;
+ ackRequest.set_subscription("subscriptionName");
+ if (Subscriber.Pull(pull_request, pull_response) == OK) {
+ for (ReceivedMessage received in pull_response.received_messages()) {
+ Process(received.message().data());
+ ackRequest.add_ack_id(received.ack_id());
+ }
+ }
+ if (ackRequest.ack_ids().size() > 0) {
+ Subscriber.Acknowledge(ack_request);
+ }
+}
+```
+
+## Reliability Semantics
+When a subscriber successfully creates a subscription using
+`Subscriber.CreateSubscription`, it establishes a "subscription point" for
+that subscription, no later than the time that `Subscriber.CreateSubscription`
+returns. The subscriber is guaranteed to receive any message published after
+this subscription point. Note that messages published before the subscription
+point may or may not be delivered.
+
+Messages are not delivered in any particular order by the Pub/Sub system.
+Furthermore, the system guarantees *at-least-once* delivery of each message
+until acknowledged.
+
+## Deletion
+Both topics and subscriptions may be deleted.
+
+When a subscription is deleted, all messages are immediately dropped. If it
+is a pull subscriber, future pull requests will return NOT_FOUND.
diff --git a/third_party/googleapis/google/pubsub/v1beta2/pubsub.proto b/third_party/googleapis/google/pubsub/v1beta2/pubsub.proto
new file mode 100644
index 0000000000..70dd1556a2
--- /dev/null
+++ b/third_party/googleapis/google/pubsub/v1beta2/pubsub.proto
@@ -0,0 +1,384 @@
+// Copyright (c) 2015, Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.pubsub.v1beta2;
+
+import "google/protobuf/empty.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/pubsub/v1beta2;pubsub";
+option java_multiple_files = true;
+option java_outer_classname = "PubsubProto";
+option java_package = "com.google.pubsub.v1beta2";
+
+
+// The service that an application uses to manipulate subscriptions and to
+// consume messages from a subscription via the Pull method.
+service Subscriber {
+ // Creates a subscription to a given topic for a given subscriber.
+ // If the subscription already exists, returns ALREADY_EXISTS.
+ // If the corresponding topic doesn't exist, returns NOT_FOUND.
+ //
+ // If the name is not provided in the request, the server will assign a random
+ // name for this subscription on the same project as the topic.
+ rpc CreateSubscription(Subscription) returns (Subscription);
+
+ // Gets the configuration details of a subscription.
+ rpc GetSubscription(GetSubscriptionRequest) returns (Subscription);
+
+ // Lists matching subscriptions.
+ rpc ListSubscriptions(ListSubscriptionsRequest) returns (ListSubscriptionsResponse);
+
+ // Deletes an existing subscription. All pending messages in the subscription
+ // are immediately dropped. Calls to Pull after deletion will return
+ // NOT_FOUND. After a subscription is deleted, a new one may be created with
+ // the same name, but the new one has no association with the old
+ // subscription, or its topic unless the same topic is specified.
+ rpc DeleteSubscription(DeleteSubscriptionRequest) returns (google.protobuf.Empty);
+
+ // Modifies the ack deadline for a specific message. This method is useful to
+ // indicate that more time is needed to process a message by the subscriber,
+ // or to make the message available for redelivery if the processing was
+ // interrupted.
+ rpc ModifyAckDeadline(ModifyAckDeadlineRequest) returns (google.protobuf.Empty);
+
+ // Acknowledges the messages associated with the ack tokens in the
+ // AcknowledgeRequest. The Pub/Sub system can remove the relevant messages
+ // from the subscription.
+ //
+ // Acknowledging a message whose ack deadline has expired may succeed,
+ // but such a message may be redelivered later. Acknowledging a message more
+ // than once will not result in an error.
+ rpc Acknowledge(AcknowledgeRequest) returns (google.protobuf.Empty);
+
+ // Pulls messages from the server. Returns an empty list if there are no
+ // messages available in the backlog. The server may return UNAVAILABLE if
+ // there are too many concurrent pull requests pending for the given
+ // subscription.
+ rpc Pull(PullRequest) returns (PullResponse);
+
+ // Modifies the PushConfig for a specified subscription.
+ //
+ // This may be used to change a push subscription to a pull one (signified
+ // by an empty PushConfig) or vice versa, or change the endpoint URL and other
+ // attributes of a push subscription. Messages will accumulate for
+ // delivery continuously through the call regardless of changes to the
+ // PushConfig.
+ rpc ModifyPushConfig(ModifyPushConfigRequest) returns (google.protobuf.Empty);
+}
+
+// The service that an application uses to manipulate topics, and to send
+// messages to a topic.
+service Publisher {
+ // Creates the given topic with the given name.
+ rpc CreateTopic(Topic) returns (Topic);
+
+ // Adds one or more messages to the topic. Returns NOT_FOUND if the topic does
+ // not exist.
+ rpc Publish(PublishRequest) returns (PublishResponse);
+
+ // Gets the configuration of a topic.
+ rpc GetTopic(GetTopicRequest) returns (Topic);
+
+ // Lists matching topics.
+ rpc ListTopics(ListTopicsRequest) returns (ListTopicsResponse);
+
+ // Lists the name of the subscriptions for this topic.
+ rpc ListTopicSubscriptions(ListTopicSubscriptionsRequest) returns (ListTopicSubscriptionsResponse);
+
+ // Deletes the topic with the given name. Returns NOT_FOUND if the topic does
+ // not exist. After a topic is deleted, a new topic may be created with the
+ // same name; this is an entirely new topic with none of the old
+ // configuration or subscriptions. Existing subscriptions to this topic are
+ // not deleted.
+ rpc DeleteTopic(DeleteTopicRequest) returns (google.protobuf.Empty);
+}
+
+// A topic resource.
+message Topic {
+ // Name of the topic.
+ string name = 1;
+}
+
+// A message data and its attributes.
+message PubsubMessage {
+ // The message payload. For JSON requests, the value of this field must be
+ // base64-encoded.
+ bytes data = 1;
+
+ // Optional attributes for this message.
+ map<string, string> attributes = 2;
+
+ // ID of this message assigned by the server at publication time. Guaranteed
+ // to be unique within the topic. This value may be read by a subscriber
+ // that receives a PubsubMessage via a Pull call or a push delivery. It must
+ // not be populated by a publisher in a Publish call.
+ string message_id = 3;
+}
+
+// Request for the GetTopic method.
+message GetTopicRequest {
+ // The name of the topic to get.
+ string topic = 1;
+}
+
+// Request for the Publish method.
+message PublishRequest {
+ // The messages in the request will be published on this topic.
+ string topic = 1;
+
+ // The messages to publish.
+ repeated PubsubMessage messages = 2;
+}
+
+// Response for the Publish method.
+message PublishResponse {
+ // The server-assigned ID of each published message, in the same order as
+ // the messages in the request. IDs are guaranteed to be unique within
+ // the topic.
+ repeated string message_ids = 1;
+}
+
+// Request for the ListTopics method.
+message ListTopicsRequest {
+ // The name of the cloud project that topics belong to.
+ string project = 1;
+
+ // Maximum number of topics to return.
+ int32 page_size = 2;
+
+ // The value returned by the last ListTopicsResponse; indicates that this is
+ // a continuation of a prior ListTopics call, and that the system should
+ // return the next page of data.
+ string page_token = 3;
+}
+
+// Response for the ListTopics method.
+message ListTopicsResponse {
+ // The resulting topics.
+ repeated Topic topics = 1;
+
+ // If not empty, indicates that there may be more topics that match the
+ // request; this value should be passed in a new ListTopicsRequest.
+ string next_page_token = 2;
+}
+
+// Request for the ListTopicSubscriptions method.
+message ListTopicSubscriptionsRequest {
+ // The name of the topic that subscriptions are attached to.
+ string topic = 1;
+
+ // Maximum number of subscription names to return.
+ int32 page_size = 2;
+
+ // The value returned by the last ListTopicSubscriptionsResponse; indicates
+ // that this is a continuation of a prior ListTopicSubscriptions call, and
+ // that the system should return the next page of data.
+ string page_token = 3;
+}
+
+// Response for the ListTopicSubscriptions method.
+message ListTopicSubscriptionsResponse {
+ // The names of the subscriptions that match the request.
+ repeated string subscriptions = 1;
+
+ // If not empty, indicates that there may be more subscriptions that match
+ // the request; this value should be passed in a new
+ // ListTopicSubscriptionsRequest to get more subscriptions.
+ string next_page_token = 2;
+}
+
+// Request for the DeleteTopic method.
+message DeleteTopicRequest {
+ // Name of the topic to delete.
+ string topic = 1;
+}
+
+// A subscription resource.
+message Subscription {
+ // Name of the subscription.
+ string name = 1;
+
+ // The name of the topic from which this subscription is receiving messages.
+ // This will be present if and only if the subscription has not been detached
+ // from its topic.
+ string topic = 2;
+
+ // If push delivery is used with this subscription, this field is
+ // used to configure it. An empty pushConfig signifies that the subscriber
+ // will pull and ack messages using API methods.
+ PushConfig push_config = 4;
+
+ // This value is the maximum time after a subscriber receives a message
+ // before the subscriber should acknowledge the message. After message
+ // delivery but before the ack deadline expires and before the message is
+ // acknowledged, it is an outstanding message and will not be delivered
+ // again during that time (on a best-effort basis).
+ //
+ // For pull delivery this value
+ // is used as the initial value for the ack deadline. It may be overridden
+ // for a specific message by calling ModifyAckDeadline.
+ //
+ // For push delivery, this value is also used to set the request timeout for
+ // the call to the push endpoint.
+ //
+ // If the subscriber never acknowledges the message, the Pub/Sub
+ // system will eventually redeliver the message.
+ int32 ack_deadline_seconds = 5;
+}
+
+// Configuration for a push delivery endpoint.
+message PushConfig {
+ // A URL locating the endpoint to which messages should be pushed.
+ // For example, a Webhook endpoint might use "https://example.com/push".
+ string push_endpoint = 1;
+
+ // Endpoint configuration attributes.
+ //
+ // Every endpoint has a set of API supported attributes that can be used to
+ // control different aspects of the message delivery.
+ //
+ // The currently supported attribute is `x-goog-version`, which you can
+ // use to change the format of the push message. This attribute
+ // indicates the version of the data expected by the endpoint. This
+ // controls the shape of the envelope (i.e. its fields and metadata).
+ // The endpoint version is based on the version of the Pub/Sub
+ // API.
+ //
+ // If not present during the CreateSubscription call, it will default to
+ // the version of the API used to make such call. If not present during a
+ // ModifyPushConfig call, its value will not be changed. GetSubscription
+ // calls will always return a valid version, even if the subscription was
+ // created without this attribute.
+ //
+ // The possible values for this attribute are:
+ //
+ // * `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API.
+ // * `v1beta2`: uses the push format defined in the v1beta2 Pub/Sub API.
+ //
+ map<string, string> attributes = 2;
+}
+
+// A message and its corresponding acknowledgment ID.
+message ReceivedMessage {
+ // This ID can be used to acknowledge the received message.
+ string ack_id = 1;
+
+ // The message.
+ PubsubMessage message = 2;
+}
+
+// Request for the GetSubscription method.
+message GetSubscriptionRequest {
+ // The name of the subscription to get.
+ string subscription = 1;
+}
+
+// Request for the ListSubscriptions method.
+message ListSubscriptionsRequest {
+ // The name of the cloud project that subscriptions belong to.
+ string project = 1;
+
+ // Maximum number of subscriptions to return.
+ int32 page_size = 2;
+
+ // The value returned by the last ListSubscriptionsResponse; indicates that
+ // this is a continuation of a prior ListSubscriptions call, and that the
+ // system should return the next page of data.
+ string page_token = 3;
+}
+
+// Response for the ListSubscriptions method.
+message ListSubscriptionsResponse {
+ // The subscriptions that match the request.
+ repeated Subscription subscriptions = 1;
+
+ // If not empty, indicates that there may be more subscriptions that match
+ // the request; this value should be passed in a new ListSubscriptionsRequest
+ // to get more subscriptions.
+ string next_page_token = 2;
+}
+
+// Request for the DeleteSubscription method.
+message DeleteSubscriptionRequest {
+ // The subscription to delete.
+ string subscription = 1;
+}
+
+// Request for the ModifyPushConfig method.
+message ModifyPushConfigRequest {
+ // The name of the subscription.
+ string subscription = 1;
+
+ // The push configuration for future deliveries.
+ //
+ // An empty pushConfig indicates that the Pub/Sub system should
+ // stop pushing messages from the given subscription and allow
+ // messages to be pulled and acknowledged - effectively pausing
+ // the subscription if Pull is not called.
+ PushConfig push_config = 2;
+}
+
+// Request for the Pull method.
+message PullRequest {
+ // The subscription from which messages should be pulled.
+ string subscription = 1;
+
+ // If this is specified as true the system will respond immediately even if
+ // it is not able to return a message in the Pull response. Otherwise the
+ // system is allowed to wait until at least one message is available rather
+ // than returning no messages. The client may cancel the request if it does
+ // not wish to wait any longer for the response.
+ bool return_immediately = 2;
+
+ // The maximum number of messages returned for this request. The Pub/Sub
+ // system may return fewer than the number specified.
+ int32 max_messages = 3;
+}
+
+// Response for the Pull method.
+message PullResponse {
+ // Received Pub/Sub messages. The Pub/Sub system will return zero messages if
+ // there are no more available in the backlog. The Pub/Sub system may return
+ // fewer than the maxMessages requested even if there are more messages
+ // available in the backlog.
+ repeated ReceivedMessage received_messages = 1;
+}
+
+// Request for the ModifyAckDeadline method.
+message ModifyAckDeadlineRequest {
+ // The name of the subscription.
+ string subscription = 1;
+
+ // The acknowledgment ID.
+ string ack_id = 2;
+
+ // The new ack deadline with respect to the time this request was sent to the
+ // Pub/Sub system. Must be >= 0. For example, if the value is 10, the new ack
+ // deadline will expire 10 seconds after the ModifyAckDeadline call was made.
+ // Specifying zero may immediately make the message available for another pull
+ // request.
+ int32 ack_deadline_seconds = 3;
+}
+
+// Request for the Acknowledge method.
+message AcknowledgeRequest {
+ // The subscription whose message is being acknowledged.
+ string subscription = 1;
+
+ // The acknowledgment ID for the messages being acknowledged that was returned
+ // by the Pub/Sub system in the Pull response. Must not be empty.
+ repeated string ack_ids = 2;
+}
diff --git a/third_party/googleapis/google/rpc/README.md b/third_party/googleapis/google/rpc/README.md
new file mode 100644
index 0000000000..3a612228e2
--- /dev/null
+++ b/third_party/googleapis/google/rpc/README.md
@@ -0,0 +1,5 @@
+# Google RPC
+
+This package contains type definitions for general RPC systems. While
+[gRPC](https://github.com/grpc) is using these defintions, they
+are not designed specifically to support gRPC. \ No newline at end of file
diff --git a/third_party/googleapis/google/rpc/code.proto b/third_party/googleapis/google/rpc/code.proto
new file mode 100644
index 0000000000..9a450956a2
--- /dev/null
+++ b/third_party/googleapis/google/rpc/code.proto
@@ -0,0 +1,180 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.rpc;
+
+option go_package = "google.golang.org/genproto/googleapis/rpc/code;code";
+option java_multiple_files = true;
+option java_outer_classname = "CodeProto";
+option java_package = "com.google.rpc";
+option objc_class_prefix = "RPC";
+
+
+// The canonical error codes for Google APIs.
+//
+//
+// Sometimes multiple error codes may apply. Services should return
+// the most specific error code that applies. For example, prefer
+// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply.
+// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`.
+enum Code {
+ // Not an error; returned on success
+ //
+ // HTTP Mapping: 200 OK
+ OK = 0;
+
+ // The operation was cancelled, typically by the caller.
+ //
+ // HTTP Mapping: 499 Client Closed Request
+ CANCELLED = 1;
+
+ // Unknown error. For example, this error may be returned when
+ // a `Status` value received from another address space belongs to
+ // an error space that is not known in this address space. Also
+ // errors raised by APIs that do not return enough error information
+ // may be converted to this error.
+ //
+ // HTTP Mapping: 500 Internal Server Error
+ UNKNOWN = 2;
+
+ // The client specified an invalid argument. Note that this differs
+ // from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments
+ // that are problematic regardless of the state of the system
+ // (e.g., a malformed file name).
+ //
+ // HTTP Mapping: 400 Bad Request
+ INVALID_ARGUMENT = 3;
+
+ // The deadline expired before the operation could complete. For operations
+ // that change the state of the system, this error may be returned
+ // even if the operation has completed successfully. For example, a
+ // successful response from a server could have been delayed long
+ // enough for the deadline to expire.
+ //
+ // HTTP Mapping: 504 Gateway Timeout
+ DEADLINE_EXCEEDED = 4;
+
+ // Some requested entity (e.g., file or directory) was not found.
+ // For privacy reasons, this code *may* be returned when the client
+ // does not have the access rights to the entity, though such usage is
+ // discouraged.
+ //
+ // HTTP Mapping: 404 Not Found
+ NOT_FOUND = 5;
+
+ // The entity that a client attempted to create (e.g., file or directory)
+ // already exists.
+ //
+ // HTTP Mapping: 409 Conflict
+ ALREADY_EXISTS = 6;
+
+ // The caller does not have permission to execute the specified
+ // operation. `PERMISSION_DENIED` must not be used for rejections
+ // caused by exhausting some resource (use `RESOURCE_EXHAUSTED`
+ // instead for those errors). `PERMISSION_DENIED` must not be
+ // used if the caller can not be identified (use `UNAUTHENTICATED`
+ // instead for those errors).
+ //
+ // HTTP Mapping: 403 Forbidden
+ PERMISSION_DENIED = 7;
+
+ // The request does not have valid authentication credentials for the
+ // operation.
+ //
+ // HTTP Mapping: 401 Unauthorized
+ UNAUTHENTICATED = 16;
+
+ // Some resource has been exhausted, perhaps a per-user quota, or
+ // perhaps the entire file system is out of space.
+ //
+ // HTTP Mapping: 429 Too Many Requests
+ RESOURCE_EXHAUSTED = 8;
+
+ // The operation was rejected because the system is not in a state
+ // required for the operation's execution. For example, the directory
+ // to be deleted is non-empty, an rmdir operation is applied to
+ // a non-directory, etc.
+ //
+ // Service implementors can use the following guidelines to decide
+ // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`:
+ // (a) Use `UNAVAILABLE` if the client can retry just the failing call.
+ // (b) Use `ABORTED` if the client should retry at a higher level
+ // (e.g., restarting a read-modify-write sequence).
+ // (c) Use `FAILED_PRECONDITION` if the client should not retry until
+ // the system state has been explicitly fixed. E.g., if an "rmdir"
+ // fails because the directory is non-empty, `FAILED_PRECONDITION`
+ // should be returned since the client should not retry unless
+ // the files are deleted from the directory.
+ //
+ // HTTP Mapping: 400 Bad Request
+ FAILED_PRECONDITION = 9;
+
+ // The operation was aborted, typically due to a concurrency issue such as
+ // a sequencer check failure or transaction abort.
+ //
+ // See the guidelines above for deciding between `FAILED_PRECONDITION`,
+ // `ABORTED`, and `UNAVAILABLE`.
+ //
+ // HTTP Mapping: 409 Conflict
+ ABORTED = 10;
+
+ // The operation was attempted past the valid range. E.g., seeking or
+ // reading past end-of-file.
+ //
+ // Unlike `INVALID_ARGUMENT`, this error indicates a problem that may
+ // be fixed if the system state changes. For example, a 32-bit file
+ // system will generate `INVALID_ARGUMENT` if asked to read at an
+ // offset that is not in the range [0,2^32-1], but it will generate
+ // `OUT_OF_RANGE` if asked to read from an offset past the current
+ // file size.
+ //
+ // There is a fair bit of overlap between `FAILED_PRECONDITION` and
+ // `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific
+ // error) when it applies so that callers who are iterating through
+ // a space can easily look for an `OUT_OF_RANGE` error to detect when
+ // they are done.
+ //
+ // HTTP Mapping: 400 Bad Request
+ OUT_OF_RANGE = 11;
+
+ // The operation is not implemented or is not supported/enabled in this
+ // service.
+ //
+ // HTTP Mapping: 501 Not Implemented
+ UNIMPLEMENTED = 12;
+
+ // Internal errors. This means that some invariants expected by the
+ // underlying system have been broken. This error code is reserved
+ // for serious errors.
+ //
+ // HTTP Mapping: 500 Internal Server Error
+ INTERNAL = 13;
+
+ // The service is currently unavailable. This is most likely a
+ // transient condition, which can be corrected by retrying with
+ // a backoff.
+ //
+ // See the guidelines above for deciding between `FAILED_PRECONDITION`,
+ // `ABORTED`, and `UNAVAILABLE`.
+ //
+ // HTTP Mapping: 503 Service Unavailable
+ UNAVAILABLE = 14;
+
+ // Unrecoverable data loss or corruption.
+ //
+ // HTTP Mapping: 500 Internal Server Error
+ DATA_LOSS = 15;
+}
diff --git a/third_party/googleapis/google/rpc/error_details.proto b/third_party/googleapis/google/rpc/error_details.proto
new file mode 100644
index 0000000000..4732d421d9
--- /dev/null
+++ b/third_party/googleapis/google/rpc/error_details.proto
@@ -0,0 +1,171 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.rpc;
+
+import "google/protobuf/duration.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/rpc/errdetails;errdetails";
+option java_multiple_files = true;
+option java_outer_classname = "ErrorDetailsProto";
+option java_package = "com.google.rpc";
+option objc_class_prefix = "RPC";
+
+
+// Describes when the clients can retry a failed request. Clients could ignore
+// the recommendation here or retry when this information is missing from error
+// responses.
+//
+// It's always recommended that clients should use exponential backoff when
+// retrying.
+//
+// Clients should wait until `retry_delay` amount of time has passed since
+// receiving the error response before retrying. If retrying requests also
+// fail, clients should use an exponential backoff scheme to gradually increase
+// the delay between retries based on `retry_delay`, until either a maximum
+// number of retires have been reached or a maximum retry delay cap has been
+// reached.
+message RetryInfo {
+ // Clients should wait at least this long between retrying the same request.
+ google.protobuf.Duration retry_delay = 1;
+}
+
+// Describes additional debugging info.
+message DebugInfo {
+ // The stack trace entries indicating where the error occurred.
+ repeated string stack_entries = 1;
+
+ // Additional debugging information provided by the server.
+ string detail = 2;
+}
+
+// Describes how a quota check failed.
+//
+// For example if a daily limit was exceeded for the calling project,
+// a service could respond with a QuotaFailure detail containing the project
+// id and the description of the quota limit that was exceeded. If the
+// calling project hasn't enabled the service in the developer console, then
+// a service could respond with the project id and set `service_disabled`
+// to true.
+//
+// Also see RetryDetail and Help types for other details about handling a
+// quota failure.
+message QuotaFailure {
+ // A message type used to describe a single quota violation. For example, a
+ // daily quota or a custom quota that was exceeded.
+ message Violation {
+ // The subject on which the quota check failed.
+ // For example, "clientip:<ip address of client>" or "project:<Google
+ // developer project id>".
+ string subject = 1;
+
+ // A description of how the quota check failed. Clients can use this
+ // description to find more about the quota configuration in the service's
+ // public documentation, or find the relevant quota limit to adjust through
+ // developer console.
+ //
+ // For example: "Service disabled" or "Daily Limit for read operations
+ // exceeded".
+ string description = 2;
+ }
+
+ // Describes all quota violations.
+ repeated Violation violations = 1;
+}
+
+// Describes violations in a client request. This error type focuses on the
+// syntactic aspects of the request.
+message BadRequest {
+ // A message type used to describe a single bad request field.
+ message FieldViolation {
+ // A path leading to a field in the request body. The value will be a
+ // sequence of dot-separated identifiers that identify a protocol buffer
+ // field. E.g., "field_violations.field" would identify this field.
+ string field = 1;
+
+ // A description of why the request element is bad.
+ string description = 2;
+ }
+
+ // Describes all violations in a client request.
+ repeated FieldViolation field_violations = 1;
+}
+
+// Contains metadata about the request that clients can attach when filing a bug
+// or providing other forms of feedback.
+message RequestInfo {
+ // An opaque string that should only be interpreted by the service generating
+ // it. For example, it can be used to identify requests in the service's logs.
+ string request_id = 1;
+
+ // Any data that was used to serve this request. For example, an encrypted
+ // stack trace that can be sent back to the service provider for debugging.
+ string serving_data = 2;
+}
+
+// Describes the resource that is being accessed.
+message ResourceInfo {
+ // A name for the type of resource being accessed, e.g. "sql table",
+ // "cloud storage bucket", "file", "Google calendar"; or the type URL
+ // of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic".
+ string resource_type = 1;
+
+ // The name of the resource being accessed. For example, a shared calendar
+ // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current
+ // error is [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED].
+ string resource_name = 2;
+
+ // The owner of the resource (optional).
+ // For example, "user:<owner email>" or "project:<Google developer project
+ // id>".
+ string owner = 3;
+
+ // Describes what error is encountered when accessing this resource.
+ // For example, updating a cloud project may require the `writer` permission
+ // on the developer console project.
+ string description = 4;
+}
+
+// Provides links to documentation or for performing an out of band action.
+//
+// For example, if a quota check failed with an error indicating the calling
+// project hasn't enabled the accessed service, this can contain a URL pointing
+// directly to the right place in the developer console to flip the bit.
+message Help {
+ // Describes a URL link.
+ message Link {
+ // Describes what the link offers.
+ string description = 1;
+
+ // The URL of the link.
+ string url = 2;
+ }
+
+ // URL(s) pointing to additional information on handling the current error.
+ repeated Link links = 1;
+}
+
+// Provides a localized error message that is safe to return to the user
+// which can be attached to an RPC error.
+message LocalizedMessage {
+ // The locale used following the specification defined at
+ // http://www.rfc-editor.org/rfc/bcp/bcp47.txt.
+ // Examples are: "en-US", "fr-CH", "es-MX"
+ string locale = 1;
+
+ // The localized error message in the above locale.
+ string message = 2;
+}
diff --git a/third_party/googleapis/google/rpc/status.proto b/third_party/googleapis/google/rpc/status.proto
new file mode 100644
index 0000000000..bc6097b29f
--- /dev/null
+++ b/third_party/googleapis/google/rpc/status.proto
@@ -0,0 +1,92 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.rpc;
+
+import "google/protobuf/any.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/rpc/status;status";
+option java_multiple_files = true;
+option java_outer_classname = "StatusProto";
+option java_package = "com.google.rpc";
+option objc_class_prefix = "RPC";
+
+
+// The `Status` type defines a logical error model that is suitable for different
+// programming environments, including REST APIs and RPC APIs. It is used by
+// [gRPC](https://github.com/grpc). The error model is designed to be:
+//
+// - Simple to use and understand for most users
+// - Flexible enough to meet unexpected needs
+//
+// # Overview
+//
+// The `Status` message contains three pieces of data: error code, error message,
+// and error details. The error code should be an enum value of
+// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The
+// error message should be a developer-facing English message that helps
+// developers *understand* and *resolve* the error. If a localized user-facing
+// error message is needed, put the localized message in the error details or
+// localize it in the client. The optional error details may contain arbitrary
+// information about the error. There is a predefined set of error detail types
+// in the package `google.rpc` which can be used for common error conditions.
+//
+// # Language mapping
+//
+// The `Status` message is the logical representation of the error model, but it
+// is not necessarily the actual wire format. When the `Status` message is
+// exposed in different client libraries and different wire protocols, it can be
+// mapped differently. For example, it will likely be mapped to some exceptions
+// in Java, but more likely mapped to some error codes in C.
+//
+// # Other uses
+//
+// The error model and the `Status` message can be used in a variety of
+// environments, either with or without APIs, to provide a
+// consistent developer experience across different environments.
+//
+// Example uses of this error model include:
+//
+// - Partial errors. If a service needs to return partial errors to the client,
+// it may embed the `Status` in the normal response to indicate the partial
+// errors.
+//
+// - Workflow errors. A typical workflow has multiple steps. Each step may
+// have a `Status` message for error reporting purpose.
+//
+// - Batch operations. If a client uses batch request and batch response, the
+// `Status` message should be used directly inside batch response, one for
+// each error sub-response.
+//
+// - Asynchronous operations. If an API call embeds asynchronous operation
+// results in its response, the status of those operations should be
+// represented directly using the `Status` message.
+//
+// - Logging. If some API errors are stored in logs, the message `Status` could
+// be used directly after any stripping needed for security/privacy reasons.
+message Status {
+ // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
+ int32 code = 1;
+
+ // A developer-facing error message, which should be in English. Any
+ // user-facing error message should be localized and sent in the
+ // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
+ string message = 2;
+
+ // A list of messages that carry the error details. There will be a
+ // common set of message types for APIs to use.
+ repeated google.protobuf.Any details = 3;
+}
diff --git a/third_party/googleapis/google/spanner/admin/database/spanner_admin_database.yaml b/third_party/googleapis/google/spanner/admin/database/spanner_admin_database.yaml
new file mode 100644
index 0000000000..2f0860a669
--- /dev/null
+++ b/third_party/googleapis/google/spanner/admin/database/spanner_admin_database.yaml
@@ -0,0 +1,41 @@
+type: google.api.Service
+config_version: 3
+name: spanner.googleapis.com
+title: Cloud Spanner Database Admin API
+
+apis:
+ - name: google.spanner.admin.database.v1.DatabaseAdmin
+ mixins:
+ - name: google.iam.v1.IAMPolicy
+
+types:
+ - name: google.spanner.admin.database.v1.CreateDatabaseMetadata
+ - name: google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata
+
+authentication:
+ rules:
+ - selector: google.spanner.admin.database.v1.DatabaseAdmin.*,
+ google.iam.v1.IAMPolicy.*,
+ google.longrunning.Operations.*
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/spanner.admin,
+ https://www.googleapis.com/auth/cloud-platform
+
+http:
+ rules:
+ - selector: google.longrunning.Operations.GetOperation
+ get: '/v1/{name=projects/*/instances/*/databases/*/operations/*}'
+ additional_bindings:
+ - get: '/v1/{name=projects/*/instances/*/operations/*}'
+ - selector: google.longrunning.Operations.ListOperations
+ get: '/v1/{name=projects/*/instances/*/databases/*/operations}'
+ additional_bindings:
+ - get: '/v1/{name=projects/*/instances/*/operations}'
+ - selector: google.longrunning.Operations.CancelOperation
+ post: '/v1/{name=projects/*/instances/*/databases/*/operations/*}:cancel'
+ additional_bindings:
+ - post: '/v1/{name=projects/*/instances/*/operations/*}:cancel'
+ - selector: google.longrunning.Operations.DeleteOperation
+ delete: '/v1/{name=projects/*/instances/*/databases/*/operations/*}'
+ additional_bindings:
+ - delete: '/v1/{name=projects/*/instances/*/operations/*}'
diff --git a/third_party/googleapis/google/spanner/admin/database/v1/spanner_admin_database_gapic.yaml b/third_party/googleapis/google/spanner/admin/database/v1/spanner_admin_database_gapic.yaml
new file mode 100644
index 0000000000..19b90e1b79
--- /dev/null
+++ b/third_party/googleapis/google/spanner/admin/database/v1/spanner_admin_database_gapic.yaml
@@ -0,0 +1,214 @@
+type: com.google.api.codegen.ConfigProto
+language_settings:
+ java:
+ package_name: com.google.cloud.spanner.admin.database.spi.v1
+ python:
+ package_name: google.cloud.gapic.spanner_admin_database.v1
+ go:
+ package_name: cloud.google.com/go/spanner/admin/database/apiv1
+ csharp:
+ package_name: Google.Cloud.Spanner.Admin.Database.V1
+ ruby:
+ package_name: Google::Cloud::Spanner::Admin::Database::V1
+ php:
+ package_name: Google\Cloud\Spanner\Admin\Database\V1
+ nodejs:
+ package_name: spanner-admin-database.v1
+ domain_layer_location: google-cloud
+license_header:
+ copyright_file: copyright-google.txt
+ license_file: license-header-apache-2.0.txt
+interfaces:
+- name: google.spanner.admin.database.v1.DatabaseAdmin
+ collections:
+ - name_pattern: projects/{project}/instances/{instance}
+ entity_name: instance
+ - name_pattern: projects/{project}/instances/{instance}/databases/{database}
+ entity_name: database
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 1000
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 32000
+ initial_rpc_timeout_millis: 60000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 60000
+ total_timeout_millis: 600000
+ methods:
+ - name: ListDatabases
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ required_fields:
+ - parent
+ request_object_method: false
+ resource_name_treatment: STATIC_TYPES
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: databases
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: instance
+ timeout_millis: 30000
+ - name: CreateDatabase
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - create_statement
+ required_fields:
+ - parent
+ - create_statement
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: instance
+ timeout_millis: 30000
+ long_running:
+ return_type: google.spanner.admin.database.v1.Database
+ metadata_type: google.spanner.admin.database.v1.CreateDatabaseMetadata
+ - name: GetDatabase
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: false
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: database
+ timeout_millis: 30000
+ - name: UpdateDatabaseDdl
+ flattening:
+ groups:
+ - parameters:
+ - database
+ - statements
+ required_fields:
+ - database
+ - statements
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ database: database
+ timeout_millis: 30000
+ long_running:
+ return_type: google.protobuf.Empty
+ metadata_type: google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata
+ - name: DropDatabase
+ flattening:
+ groups:
+ - parameters:
+ - database
+ required_fields:
+ - database
+ request_object_method: false
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ database: database
+ timeout_millis: 30000
+ - name: GetDatabaseDdl
+ flattening:
+ groups:
+ - parameters:
+ - database
+ required_fields:
+ - database
+ request_object_method: false
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ database: database
+ timeout_millis: 30000
+ - name: SetIamPolicy
+ flattening:
+ groups:
+ - parameters:
+ - resource
+ - policy
+ required_fields:
+ - resource
+ - policy
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ resource: database
+ timeout_millis: 30000
+ - name: GetIamPolicy
+ flattening:
+ groups:
+ - parameters:
+ - resource
+ required_fields:
+ - resource
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ resource: database
+ timeout_millis: 30000
+ - name: TestIamPermissions
+ flattening:
+ groups:
+ - parameters:
+ - resource
+ - permissions
+ required_fields:
+ - resource
+ - permissions
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ resource: database
+ timeout_millis: 30000
+resource_name_generation:
+- message_name: ListDatabasesRequest
+ field_entity_map:
+ parent: instance
+- message_name: CreateDatabaseRequest
+ field_entity_map:
+ parent: instance
+- message_name: CreateDatabaseMetadata
+ field_entity_map:
+ database: database
+- message_name: GetDatabaseRequest
+ field_entity_map:
+ name: database
+- message_name: UpdateDatabaseDdlRequest
+ field_entity_map:
+ database: database
+- message_name: UpdateDatabaseDdlMetadata
+ field_entity_map:
+ database: database
+- message_name: DropDatabaseRequest
+ field_entity_map:
+ database: database
+- message_name: GetDatabaseDdlRequest
+ field_entity_map:
+ database: database
diff --git a/third_party/googleapis/google/spanner/admin/database/v1/spanner_database_admin.proto b/third_party/googleapis/google/spanner/admin/database/v1/spanner_database_admin.proto
new file mode 100644
index 0000000000..a530071654
--- /dev/null
+++ b/third_party/googleapis/google/spanner/admin/database/v1/spanner_database_admin.proto
@@ -0,0 +1,277 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.spanner.admin.database.v1;
+
+import "google/api/annotations.proto";
+import "google/api/auth.proto";
+import "google/iam/v1/iam_policy.proto";
+import "google/iam/v1/policy.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/timestamp.proto";
+
+option csharp_namespace = "Google.Cloud.Spanner.Admin.Database.V1";
+option go_package = "google.golang.org/genproto/googleapis/spanner/admin/database/v1;database";
+option java_multiple_files = true;
+option java_outer_classname = "SpannerDatabaseAdminProto";
+option java_package = "com.google.spanner.admin.database.v1";
+
+
+// Cloud Spanner Database Admin API
+//
+// The Cloud Spanner Database Admin API can be used to create, drop, and
+// list databases. It also enables updating the schema of pre-existing
+// databases.
+service DatabaseAdmin {
+ // Lists Cloud Spanner databases.
+ rpc ListDatabases(ListDatabasesRequest) returns (ListDatabasesResponse) {
+ option (google.api.http) = { get: "/v1/{parent=projects/*/instances/*}/databases" };
+ }
+
+ // Creates a new Cloud Spanner database and starts to prepare it for serving.
+ // The returned [long-running operation][google.longrunning.Operation] will
+ // have a name of the format `<database_name>/operations/<operation_id>` and
+ // can be used to track preparation of the database. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The
+ // [response][google.longrunning.Operation.response] field type is
+ // [Database][google.spanner.admin.database.v1.Database], if successful.
+ rpc CreateDatabase(CreateDatabaseRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1/{parent=projects/*/instances/*}/databases" body: "*" };
+ }
+
+ // Gets the state of a Cloud Spanner database.
+ rpc GetDatabase(GetDatabaseRequest) returns (Database) {
+ option (google.api.http) = { get: "/v1/{name=projects/*/instances/*/databases/*}" };
+ }
+
+ // Updates the schema of a Cloud Spanner database by
+ // creating/altering/dropping tables, columns, indexes, etc. The returned
+ // [long-running operation][google.longrunning.Operation] will have a name of
+ // the format `<database_name>/operations/<operation_id>` and can be used to
+ // track execution of the schema change(s). The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response.
+ rpc UpdateDatabaseDdl(UpdateDatabaseDdlRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { patch: "/v1/{database=projects/*/instances/*/databases/*}/ddl" body: "*" };
+ }
+
+ // Drops (aka deletes) a Cloud Spanner database.
+ rpc DropDatabase(DropDatabaseRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/{database=projects/*/instances/*/databases/*}" };
+ }
+
+ // Returns the schema of a Cloud Spanner database as a list of formatted
+ // DDL statements. This method does not show pending schema updates, those may
+ // be queried using the [Operations][google.longrunning.Operations] API.
+ rpc GetDatabaseDdl(GetDatabaseDdlRequest) returns (GetDatabaseDdlResponse) {
+ option (google.api.http) = { get: "/v1/{database=projects/*/instances/*/databases/*}/ddl" };
+ }
+
+ // Sets the access control policy on a database resource. Replaces any
+ // existing policy.
+ //
+ // Authorization requires `spanner.databases.setIamPolicy` permission on
+ // [resource][google.iam.v1.SetIamPolicyRequest.resource].
+ rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) {
+ option (google.api.http) = { post: "/v1/{resource=projects/*/instances/*/databases/*}:setIamPolicy" body: "*" };
+ }
+
+ // Gets the access control policy for a database resource. Returns an empty
+ // policy if a database exists but does not have a policy set.
+ //
+ // Authorization requires `spanner.databases.getIamPolicy` permission on
+ // [resource][google.iam.v1.GetIamPolicyRequest.resource].
+ rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) {
+ option (google.api.http) = { post: "/v1/{resource=projects/*/instances/*/databases/*}:getIamPolicy" body: "*" };
+ }
+
+ // Returns permissions that the caller has on the specified database resource.
+ //
+ // Attempting this RPC on a non-existent Cloud Spanner database will result in
+ // a NOT_FOUND error if the user has `spanner.databases.list` permission on
+ // the containing Cloud Spanner instance. Otherwise returns an empty set of
+ // permissions.
+ rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) {
+ option (google.api.http) = { post: "/v1/{resource=projects/*/instances/*/databases/*}:testIamPermissions" body: "*" };
+ }
+}
+
+// A Cloud Spanner database.
+message Database {
+ // Indicates the current state of the database.
+ enum State {
+ // Not specified.
+ STATE_UNSPECIFIED = 0;
+
+ // The database is still being created. Operations on the database may fail
+ // with `FAILED_PRECONDITION` in this state.
+ CREATING = 1;
+
+ // The database is fully created and ready for use.
+ READY = 2;
+ }
+
+ // Required. The name of the database. Values are of the form
+ // `projects/<project>/instances/<instance>/databases/<database>`,
+ // where `<database>` is as specified in the `CREATE DATABASE`
+ // statement. This name can be passed to other API methods to
+ // identify the database.
+ string name = 1;
+
+ // Output only. The current database state.
+ State state = 2;
+}
+
+// The request for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
+message ListDatabasesRequest {
+ // Required. The instance whose databases should be listed.
+ // Values are of the form `projects/<project>/instances/<instance>`.
+ string parent = 1;
+
+ // Number of databases to be returned in the response. If 0 or less,
+ // defaults to the server's maximum allowed page size.
+ int32 page_size = 3;
+
+ // If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] from a
+ // previous [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
+ string page_token = 4;
+}
+
+// The response for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
+message ListDatabasesResponse {
+ // Databases that matched the request.
+ repeated Database databases = 1;
+
+ // `next_page_token` can be sent in a subsequent
+ // [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] call to fetch more
+ // of the matching databases.
+ string next_page_token = 2;
+}
+
+// The request for [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
+message CreateDatabaseRequest {
+ // Required. The name of the instance that will serve the new database.
+ // Values are of the form `projects/<project>/instances/<instance>`.
+ string parent = 1;
+
+ // Required. A `CREATE DATABASE` statement, which specifies the ID of the
+ // new database. The database ID must conform to the regular expression
+ // `[a-z][a-z0-9_\-]*[a-z0-9]` and be between 2 and 30 characters in length.
+ string create_statement = 2;
+
+ // An optional list of DDL statements to run inside the newly created
+ // database. Statements can create tables, indexes, etc. These
+ // statements execute atomically with the creation of the database:
+ // if there is an error in any statement, the database is not created.
+ repeated string extra_statements = 3;
+}
+
+// Metadata type for the operation returned by
+// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
+message CreateDatabaseMetadata {
+ // The database being created.
+ string database = 1;
+}
+
+// The request for [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase].
+message GetDatabaseRequest {
+ // Required. The name of the requested database. Values are of the form
+ // `projects/<project>/instances/<instance>/databases/<database>`.
+ string name = 1;
+}
+
+// Enqueues the given DDL statements to be applied, in order but not
+// necessarily all at once, to the database schema at some point (or
+// points) in the future. The server checks that the statements
+// are executable (syntactically valid, name tables that exist, etc.)
+// before enqueueing them, but they may still fail upon
+// later execution (e.g., if a statement from another batch of
+// statements is applied first and it conflicts in some way, or if
+// there is some data-related problem like a `NULL` value in a column to
+// which `NOT NULL` would be added). If a statement fails, all
+// subsequent statements in the batch are automatically cancelled.
+//
+// Each batch of statements is assigned a name which can be used with
+// the [Operations][google.longrunning.Operations] API to monitor
+// progress. See the
+// [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id] field for more
+// details.
+message UpdateDatabaseDdlRequest {
+ // Required. The database to update.
+ string database = 1;
+
+ // DDL statements to be applied to the database.
+ repeated string statements = 2;
+
+ // If empty, the new update request is assigned an
+ // automatically-generated operation ID. Otherwise, `operation_id`
+ // is used to construct the name of the resulting
+ // [Operation][google.longrunning.Operation].
+ //
+ // Specifying an explicit operation ID simplifies determining
+ // whether the statements were executed in the event that the
+ // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] call is replayed,
+ // or the return value is otherwise lost: the [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] and
+ // `operation_id` fields can be combined to form the
+ // [name][google.longrunning.Operation.name] of the resulting
+ // [longrunning.Operation][google.longrunning.Operation]: `<database>/operations/<operation_id>`.
+ //
+ // `operation_id` should be unique within the database, and must be
+ // a valid identifier: `[a-z][a-z0-9_]*`. Note that
+ // automatically-generated operation IDs always begin with an
+ // underscore. If the named operation already exists,
+ // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] returns
+ // `ALREADY_EXISTS`.
+ string operation_id = 3;
+}
+
+// Metadata type for the operation returned by
+// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
+message UpdateDatabaseDdlMetadata {
+ // The database being modified.
+ string database = 1;
+
+ // For an update this list contains all the statements. For an
+ // individual statement, this list contains only that statement.
+ repeated string statements = 2;
+
+ // Reports the commit timestamps of all statements that have
+ // succeeded so far, where `commit_timestamps[i]` is the commit
+ // timestamp for the statement `statements[i]`.
+ repeated google.protobuf.Timestamp commit_timestamps = 3;
+}
+
+// The request for [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase].
+message DropDatabaseRequest {
+ // Required. The database to be dropped.
+ string database = 1;
+}
+
+// The request for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
+message GetDatabaseDdlRequest {
+ // Required. The database whose schema we wish to get.
+ string database = 1;
+}
+
+// The response for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
+message GetDatabaseDdlResponse {
+ // A list of formatted DDL statements defining the schema of the database
+ // specified in the request.
+ repeated string statements = 1;
+}
diff --git a/third_party/googleapis/google/spanner/admin/instance/spanner_admin_instance.yaml b/third_party/googleapis/google/spanner/admin/instance/spanner_admin_instance.yaml
new file mode 100644
index 0000000000..39091826fb
--- /dev/null
+++ b/third_party/googleapis/google/spanner/admin/instance/spanner_admin_instance.yaml
@@ -0,0 +1,41 @@
+type: google.api.Service
+config_version: 3
+name: spanner.googleapis.com
+title: Cloud Spanner Instance Admin API
+
+apis:
+ - name: google.spanner.admin.instance.v1.InstanceAdmin
+ mixins:
+ - name: google.iam.v1.IAMPolicy
+
+types:
+ - name: google.spanner.admin.instance.v1.CreateInstanceMetadata
+ - name: google.spanner.admin.instance.v1.UpdateInstanceMetadata
+
+authentication:
+ rules:
+ - selector: google.spanner.admin.instance.v1.InstanceAdmin.*,
+ google.iam.v1.IAMPolicy.*,
+ google.longrunning.Operations.*
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/spanner.admin,
+ https://www.googleapis.com/auth/cloud-platform
+
+http:
+ rules:
+ - selector: google.longrunning.Operations.GetOperation
+ get: '/v1/{name=projects/*/instances/*/databases/*/operations/*}'
+ additional_bindings:
+ - get: '/v1/{name=projects/*/instances/*/operations/*}'
+ - selector: google.longrunning.Operations.ListOperations
+ get: '/v1/{name=projects/*/instances/*/databases/*/operations}'
+ additional_bindings:
+ - get: '/v1/{name=projects/*/instances/*/operations}'
+ - selector: google.longrunning.Operations.CancelOperation
+ post: '/v1/{name=projects/*/instances/*/databases/*/operations/*}:cancel'
+ additional_bindings:
+ - post: '/v1/{name=projects/*/instances/*/operations/*}:cancel'
+ - selector: google.longrunning.Operations.DeleteOperation
+ delete: '/v1/{name=projects/*/instances/*/databases/*/operations/*}'
+ additional_bindings:
+ - delete: '/v1/{name=projects/*/instances/*/operations/*}'
diff --git a/third_party/googleapis/google/spanner/admin/instance/v1/spanner_admin_instance_gapic.yaml b/third_party/googleapis/google/spanner/admin/instance/v1/spanner_admin_instance_gapic.yaml
new file mode 100644
index 0000000000..3151b59cd6
--- /dev/null
+++ b/third_party/googleapis/google/spanner/admin/instance/v1/spanner_admin_instance_gapic.yaml
@@ -0,0 +1,241 @@
+type: com.google.api.codegen.ConfigProto
+language_settings:
+ java:
+ package_name: com.google.cloud.spanner.admin.instance.spi.v1
+ python:
+ package_name: google.cloud.gapic.spanner_admin_instance.v1
+ go:
+ package_name: cloud.google.com/go/spanner/admin/instance/apiv1
+ csharp:
+ package_name: Google.Cloud.Spanner.Admin.Instance.V1
+ ruby:
+ package_name: Google::Cloud::Spanner::Admin::Instance::V1
+ php:
+ package_name: Google\Cloud\Spanner\Admin\Instance\V1
+ nodejs:
+ package_name: spanner-admin-instance.v1
+ domain_layer_location: google-cloud
+license_header:
+ copyright_file: copyright-google.txt
+ license_file: license-header-apache-2.0.txt
+interfaces:
+- name: google.spanner.admin.instance.v1.InstanceAdmin
+ collections:
+ - name_pattern: projects/{project}
+ entity_name: project
+ - name_pattern: projects/{project}/instanceConfigs/{instance_config}
+ entity_name: instance_config
+ - name_pattern: projects/{project}/instances/{instance}
+ entity_name: instance
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 1000
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 32000
+ initial_rpc_timeout_millis: 60000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 60000
+ total_timeout_millis: 600000
+ methods:
+ - name: ListInstanceConfigs
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ required_fields:
+ - parent
+ request_object_method: false
+ resource_name_treatment: STATIC_TYPES
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: instance_configs
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: project
+ timeout_millis: 30000
+ - name: GetInstanceConfig
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: false
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: instance_config
+ timeout_millis: 30000
+ - name: ListInstances
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ required_fields:
+ - parent
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ page_streaming:
+ request:
+ page_size_field: page_size
+ token_field: page_token
+ response:
+ token_field: next_page_token
+ resources_field: instances
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: project
+ timeout_millis: 30000
+ - name: GetInstance
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: false
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: instance
+ timeout_millis: 30000
+ - name: CreateInstance
+ flattening:
+ groups:
+ - parameters:
+ - parent
+ - instance_id
+ - instance
+ required_fields:
+ - parent
+ - instance_id
+ - instance
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ parent: project
+ timeout_millis: 30000
+ long_running:
+ return_type: google.spanner.admin.instance.v1.Instance
+ metadata_type: google.spanner.admin.instance.v1.CreateInstanceMetadata
+ - name: UpdateInstance
+ flattening:
+ groups:
+ - parameters:
+ - instance
+ - field_mask
+ required_fields:
+ - instance
+ - field_mask
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ instance.name: instance
+ timeout_millis: 30000
+ long_running:
+ return_type: google.spanner.admin.instance.v1.Instance
+ metadata_type: google.spanner.admin.instance.v1.UpdateInstanceMetadata
+ - name: DeleteInstance
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: false
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: instance
+ timeout_millis: 30000
+ - name: SetIamPolicy
+ flattening:
+ groups:
+ - parameters:
+ - resource
+ - policy
+ required_fields:
+ - resource
+ - policy
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ resource: instance
+ timeout_millis: 30000
+ - name: GetIamPolicy
+ flattening:
+ groups:
+ - parameters:
+ - resource
+ required_fields:
+ - resource
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ resource: instance
+ timeout_millis: 30000
+ - name: TestIamPermissions
+ flattening:
+ groups:
+ - parameters:
+ - resource
+ - permissions
+ required_fields:
+ - resource
+ - permissions
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ resource: instance
+ timeout_millis: 30000
+resource_name_generation:
+- message_name: InstanceConfig
+ field_entity_map:
+ name: instance_config
+- message_name: Instance
+ field_entity_map:
+ name: instance
+ config: instance_config
+- message_name: ListInstanceConfigsRequest
+ field_entity_map:
+ parent: project
+- message_name: GetInstanceConfigRequest
+ field_entity_map:
+ name: instance_config
+- message_name: GetInstanceRequest
+ field_entity_map:
+ name: instance
+- message_name: CreateInstanceRequest
+ field_entity_map:
+ parent: project
+ instance_id: instance
+- message_name: ListInstancesRequest
+ field_entity_map:
+ parent: project
+- message_name: DeleteInstanceRequest
+ field_entity_map:
+ name: instance
diff --git a/third_party/googleapis/google/spanner/admin/instance/v1/spanner_instance_admin.proto b/third_party/googleapis/google/spanner/admin/instance/v1/spanner_instance_admin.proto
new file mode 100644
index 0000000000..d85d781e63
--- /dev/null
+++ b/third_party/googleapis/google/spanner/admin/instance/v1/spanner_instance_admin.proto
@@ -0,0 +1,446 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.spanner.admin.instance.v1;
+
+import "google/api/annotations.proto";
+import "google/api/auth.proto";
+import "google/iam/v1/iam_policy.proto";
+import "google/iam/v1/policy.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/timestamp.proto";
+
+option csharp_namespace = "Google.Cloud.Spanner.Admin.Instance.V1";
+option go_package = "google.golang.org/genproto/googleapis/spanner/admin/instance/v1;instance";
+option java_multiple_files = true;
+option java_outer_classname = "SpannerInstanceAdminProto";
+option java_package = "com.google.spanner.admin.instance.v1";
+
+
+// Cloud Spanner Instance Admin API
+//
+// The Cloud Spanner Instance Admin API can be used to create, delete,
+// modify and list instances. Instances are dedicated Cloud Spanner serving
+// and storage resources to be used by Cloud Spanner databases.
+//
+// Each instance has a "configuration", which dictates where the
+// serving resources for the Cloud Spanner instance are located (e.g.,
+// US-central, Europe). Configurations are created by Google based on
+// resource availability.
+//
+// Cloud Spanner billing is based on the instances that exist and their
+// sizes. After an instance exists, there are no additional
+// per-database or per-operation charges for use of the instance
+// (though there may be additional network bandwidth charges).
+// Instances offer isolation: problems with databases in one instance
+// will not affect other instances. However, within an instance
+// databases can affect each other. For example, if one database in an
+// instance receives a lot of requests and consumes most of the
+// instance resources, fewer resources are available for other
+// databases in that instance, and their performance may suffer.
+service InstanceAdmin {
+ // Lists the supported instance configurations for a given project.
+ rpc ListInstanceConfigs(ListInstanceConfigsRequest) returns (ListInstanceConfigsResponse) {
+ option (google.api.http) = { get: "/v1/{parent=projects/*}/instanceConfigs" };
+ }
+
+ // Gets information about a particular instance configuration.
+ rpc GetInstanceConfig(GetInstanceConfigRequest) returns (InstanceConfig) {
+ option (google.api.http) = { get: "/v1/{name=projects/*/instanceConfigs/*}" };
+ }
+
+ // Lists all instances in the given project.
+ rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) {
+ option (google.api.http) = { get: "/v1/{parent=projects/*}/instances" };
+ }
+
+ // Gets information about a particular instance.
+ rpc GetInstance(GetInstanceRequest) returns (Instance) {
+ option (google.api.http) = { get: "/v1/{name=projects/*/instances/*}" };
+ }
+
+ // Creates an instance and begins preparing it to begin serving. The
+ // returned [long-running operation][google.longrunning.Operation]
+ // can be used to track the progress of preparing the new
+ // instance. The instance name is assigned by the caller. If the
+ // named instance already exists, `CreateInstance` returns
+ // `ALREADY_EXISTS`.
+ //
+ // Immediately upon completion of this request:
+ //
+ // * The instance is readable via the API, with all requested attributes
+ // but no allocated resources. Its state is `CREATING`.
+ //
+ // Until completion of the returned operation:
+ //
+ // * Cancelling the operation renders the instance immediately unreadable
+ // via the API.
+ // * The instance can be deleted.
+ // * All other attempts to modify the instance are rejected.
+ //
+ // Upon completion of the returned operation:
+ //
+ // * Billing for all successfully-allocated resources begins (some types
+ // may have lower than the requested levels).
+ // * Databases can be created in the instance.
+ // * The instance's allocated resource levels are readable via the API.
+ // * The instance's state becomes `READY`.
+ //
+ // The returned [long-running operation][google.longrunning.Operation] will
+ // have a name of the format `<instance_name>/operations/<operation_id>` and
+ // can be used to track creation of the instance. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [Instance][google.spanner.admin.instance.v1.Instance], if successful.
+ rpc CreateInstance(CreateInstanceRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v1/{parent=projects/*}/instances" body: "*" };
+ }
+
+ // Updates an instance, and begins allocating or releasing resources
+ // as requested. The returned [long-running
+ // operation][google.longrunning.Operation] can be used to track the
+ // progress of updating the instance. If the named instance does not
+ // exist, returns `NOT_FOUND`.
+ //
+ // Immediately upon completion of this request:
+ //
+ // * For resource types for which a decrease in the instance's allocation
+ // has been requested, billing is based on the newly-requested level.
+ //
+ // Until completion of the returned operation:
+ //
+ // * Cancelling the operation sets its metadata's
+ // [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins
+ // restoring resources to their pre-request values. The operation
+ // is guaranteed to succeed at undoing all resource changes,
+ // after which point it terminates with a `CANCELLED` status.
+ // * All other attempts to modify the instance are rejected.
+ // * Reading the instance via the API continues to give the pre-request
+ // resource levels.
+ //
+ // Upon completion of the returned operation:
+ //
+ // * Billing begins for all successfully-allocated resources (some types
+ // may have lower than the requested levels).
+ // * All newly-reserved resources are available for serving the instance's
+ // tables.
+ // * The instance's new resource levels are readable via the API.
+ //
+ // The returned [long-running operation][google.longrunning.Operation] will
+ // have a name of the format `<instance_name>/operations/<operation_id>` and
+ // can be used to track the instance modification. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [Instance][google.spanner.admin.instance.v1.Instance], if successful.
+ //
+ // Authorization requires `spanner.instances.update` permission on
+ // resource [name][google.spanner.admin.instance.v1.Instance.name].
+ rpc UpdateInstance(UpdateInstanceRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { patch: "/v1/{instance.name=projects/*/instances/*}" body: "*" };
+ }
+
+ // Deletes an instance.
+ //
+ // Immediately upon completion of the request:
+ //
+ // * Billing ceases for all of the instance's reserved resources.
+ //
+ // Soon afterward:
+ //
+ // * The instance and *all of its databases* immediately and
+ // irrevocably disappear from the API. All data in the databases
+ // is permanently deleted.
+ rpc DeleteInstance(DeleteInstanceRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/{name=projects/*/instances/*}" };
+ }
+
+ // Sets the access control policy on an instance resource. Replaces any
+ // existing policy.
+ //
+ // Authorization requires `spanner.instances.setIamPolicy` on
+ // [resource][google.iam.v1.SetIamPolicyRequest.resource].
+ rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) {
+ option (google.api.http) = { post: "/v1/{resource=projects/*/instances/*}:setIamPolicy" body: "*" };
+ }
+
+ // Gets the access control policy for an instance resource. Returns an empty
+ // policy if an instance exists but does not have a policy set.
+ //
+ // Authorization requires `spanner.instances.getIamPolicy` on
+ // [resource][google.iam.v1.GetIamPolicyRequest.resource].
+ rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) {
+ option (google.api.http) = { post: "/v1/{resource=projects/*/instances/*}:getIamPolicy" body: "*" };
+ }
+
+ // Returns permissions that the caller has on the specified instance resource.
+ //
+ // Attempting this RPC on a non-existent Cloud Spanner instance resource will
+ // result in a NOT_FOUND error if the user has `spanner.instances.list`
+ // permission on the containing Google Cloud Project. Otherwise returns an
+ // empty set of permissions.
+ rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) {
+ option (google.api.http) = { post: "/v1/{resource=projects/*/instances/*}:testIamPermissions" body: "*" };
+ }
+}
+
+// A possible configuration for a Cloud Spanner instance. Configurations
+// define the geographic placement of nodes and their replication.
+message InstanceConfig {
+ // A unique identifier for the instance configuration. Values
+ // are of the form
+ // `projects/<project>/instanceConfigs/[a-z][-a-z0-9]*`
+ string name = 1;
+
+ // The name of this instance configuration as it appears in UIs.
+ string display_name = 2;
+}
+
+// An isolated set of Cloud Spanner resources on which databases can be hosted.
+message Instance {
+ // Indicates the current state of the instance.
+ enum State {
+ // Not specified.
+ STATE_UNSPECIFIED = 0;
+
+ // The instance is still being created. Resources may not be
+ // available yet, and operations such as database creation may not
+ // work.
+ CREATING = 1;
+
+ // The instance is fully created and ready to do work such as
+ // creating databases.
+ READY = 2;
+ }
+
+ // Required. A unique identifier for the instance, which cannot be changed
+ // after the instance is created. Values are of the form
+ // `projects/<project>/instances/[a-z][-a-z0-9]*[a-z0-9]`. The final
+ // segment of the name must be between 6 and 30 characters in length.
+ string name = 1;
+
+ // Required. The name of the instance's configuration. Values are of the form
+ // `projects/<project>/instanceConfigs/<configuration>`. See
+ // also [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
+ // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
+ string config = 2;
+
+ // Required. The descriptive name for this instance as it appears in UIs.
+ // Must be unique per project and between 4 and 30 characters in length.
+ string display_name = 3;
+
+ // Required. The number of nodes allocated to this instance.
+ int32 node_count = 5;
+
+ // Output only. The current instance state. For
+ // [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance], the state must be
+ // either omitted or set to `CREATING`. For
+ // [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance], the state must be
+ // either omitted or set to `READY`.
+ State state = 6;
+
+ // Cloud Labels are a flexible and lightweight mechanism for organizing cloud
+ // resources into groups that reflect a customer's organizational needs and
+ // deployment strategies. Cloud Labels can be used to filter collections of
+ // resources. They can be used to control how resource metrics are aggregated.
+ // And they can be used as arguments to policy management rules (e.g. route,
+ // firewall, load balancing, etc.).
+ //
+ // * Label keys must be between 1 and 63 characters long and must conform to
+ // the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
+ // * Label values must be between 0 and 63 characters long and must conform
+ // to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
+ // * No more than 64 labels can be associated with a given resource.
+ //
+ // See https://goo.gl/xmQnxf for more information on and examples of labels.
+ //
+ // If you plan to use labels in your own code, please note that additional
+ // characters may be allowed in the future. And so you are advised to use an
+ // internal label representation, such as JSON, which doesn't rely upon
+ // specific characters being disallowed. For example, representing labels
+ // as the string: name + "_" + value would prove problematic if we were to
+ // allow "_" in a future release.
+ map<string, string> labels = 7;
+}
+
+// The request for [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
+message ListInstanceConfigsRequest {
+ // Required. The name of the project for which a list of supported instance
+ // configurations is requested. Values are of the form
+ // `projects/<project>`.
+ string parent = 1;
+
+ // Number of instance configurations to be returned in the response. If 0 or
+ // less, defaults to the server's maximum allowed page size.
+ int32 page_size = 2;
+
+ // If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigsResponse.next_page_token]
+ // from a previous [ListInstanceConfigsResponse][google.spanner.admin.instance.v1.ListInstanceConfigsResponse].
+ string page_token = 3;
+}
+
+// The response for [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
+message ListInstanceConfigsResponse {
+ // The list of requested instance configurations.
+ repeated InstanceConfig instance_configs = 1;
+
+ // `next_page_token` can be sent in a subsequent
+ // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs] call to
+ // fetch more of the matching instance configurations.
+ string next_page_token = 2;
+}
+
+// The request for
+// [GetInstanceConfigRequest][google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig].
+message GetInstanceConfigRequest {
+ // Required. The name of the requested instance configuration. Values are of
+ // the form `projects/<project>/instanceConfigs/<config>`.
+ string name = 1;
+}
+
+// The request for [GetInstance][google.spanner.admin.instance.v1.InstanceAdmin.GetInstance].
+message GetInstanceRequest {
+ // Required. The name of the requested instance. Values are of the form
+ // `projects/<project>/instances/<instance>`.
+ string name = 1;
+}
+
+// The request for [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance].
+message CreateInstanceRequest {
+ // Required. The name of the project in which to create the instance. Values
+ // are of the form `projects/<project>`.
+ string parent = 1;
+
+ // Required. The ID of the instance to create. Valid identifiers are of the
+ // form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 6 and 30 characters in
+ // length.
+ string instance_id = 2;
+
+ // Required. The instance to create. The name may be omitted, but if
+ // specified must be `<parent>/instances/<instance_id>`.
+ Instance instance = 3;
+}
+
+// The request for [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances].
+message ListInstancesRequest {
+ // Required. The name of the project for which a list of instances is
+ // requested. Values are of the form `projects/<project>`.
+ string parent = 1;
+
+ // Number of instances to be returned in the response. If 0 or less, defaults
+ // to the server's maximum allowed page size.
+ int32 page_size = 2;
+
+ // If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.instance.v1.ListInstancesResponse.next_page_token] from a
+ // previous [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
+ string page_token = 3;
+
+ // An expression for filtering the results of the request. Filter rules are
+ // case insensitive. The fields eligible for filtering are:
+ //
+ // * name
+ // * display_name
+ // * labels.key where key is the name of a label
+ //
+ // Some examples of using filters are:
+ //
+ // * name:* --> The instance has a name.
+ // * name:Howl --> The instance's name contains the string "howl".
+ // * name:HOWL --> Equivalent to above.
+ // * NAME:howl --> Equivalent to above.
+ // * labels.env:* --> The instance has the label "env".
+ // * labels.env:dev --> The instance has the label "env" and the value of
+ // the label contains the string "dev".
+ // * name:howl labels.env:dev --> The instance's name contains "howl" and
+ // it has the label "env" with its value
+ // containing "dev".
+ string filter = 4;
+}
+
+// The response for [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances].
+message ListInstancesResponse {
+ // The list of requested instances.
+ repeated Instance instances = 1;
+
+ // `next_page_token` can be sent in a subsequent
+ // [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances] call to fetch more
+ // of the matching instances.
+ string next_page_token = 2;
+}
+
+// The request for [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance].
+message UpdateInstanceRequest {
+ // Required. The instance to update, which must always include the instance
+ // name. Otherwise, only fields mentioned in [][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask] need be included.
+ Instance instance = 1;
+
+ // Required. A mask specifying which fields in [][google.spanner.admin.instance.v1.UpdateInstanceRequest.instance] should be updated.
+ // The field mask must always be specified; this prevents any future fields in
+ // [][google.spanner.admin.instance.v1.Instance] from being erased accidentally by clients that do not know
+ // about them.
+ google.protobuf.FieldMask field_mask = 2;
+}
+
+// The request for [DeleteInstance][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance].
+message DeleteInstanceRequest {
+ // Required. The name of the instance to be deleted. Values are of the form
+ // `projects/<project>/instances/<instance>`
+ string name = 1;
+}
+
+// Metadata type for the operation returned by
+// [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance].
+message CreateInstanceMetadata {
+ // The instance being created.
+ Instance instance = 1;
+
+ // The time at which the
+ // [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance] request was
+ // received.
+ google.protobuf.Timestamp start_time = 2;
+
+ // The time at which this operation was cancelled. If set, this operation is
+ // in the process of undoing itself (which is guaranteed to succeed) and
+ // cannot be cancelled again.
+ google.protobuf.Timestamp cancel_time = 3;
+
+ // The time at which this operation failed or was completed successfully.
+ google.protobuf.Timestamp end_time = 4;
+}
+
+// Metadata type for the operation returned by
+// [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance].
+message UpdateInstanceMetadata {
+ // The desired end state of the update.
+ Instance instance = 1;
+
+ // The time at which [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]
+ // request was received.
+ google.protobuf.Timestamp start_time = 2;
+
+ // The time at which this operation was cancelled. If set, this operation is
+ // in the process of undoing itself (which is guaranteed to succeed) and
+ // cannot be cancelled again.
+ google.protobuf.Timestamp cancel_time = 3;
+
+ // The time at which this operation failed or was completed successfully.
+ google.protobuf.Timestamp end_time = 4;
+}
diff --git a/third_party/googleapis/google/spanner/spanner.yaml b/third_party/googleapis/google/spanner/spanner.yaml
new file mode 100644
index 0000000000..99fe5149e3
--- /dev/null
+++ b/third_party/googleapis/google/spanner/spanner.yaml
@@ -0,0 +1,56 @@
+# This service config is currently set for generating client libraries for the
+# non-admin API. Use the spanner_admin_*.yaml service configs to generate admin
+# client libraries.
+
+type: google.api.Service
+config_version: 3
+name: spanner.googleapis.com
+title: Cloud Spanner API
+
+apis:
+ - name: google.spanner.v1.Spanner
+
+authentication:
+ rules:
+ - selector: google.spanner.v1.Spanner.*
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/spanner.data,
+ https://www.googleapis.com/auth/cloud-platform
+
+http:
+ rules:
+ - selector: google.longrunning.Operations.GetOperation
+ get: '/v1/{name=projects/*/instances/*/databases/*/operations/*}'
+ additional_bindings:
+ - get: '/v1/{name=projects/*/instances/*/operations/*}'
+ - selector: google.longrunning.Operations.ListOperations
+ get: '/v1/{name=projects/*/instances/*/databases/*/operations}'
+ additional_bindings:
+ - get: '/v1/{name=projects/*/instances/*/operations}'
+ - selector: google.longrunning.Operations.CancelOperation
+ post: '/v1/{name=projects/*/instances/*/databases/*/operations/*}:cancel'
+ additional_bindings:
+ - post: '/v1/{name=projects/*/instances/*/operations/*}:cancel'
+ - selector: google.longrunning.Operations.DeleteOperation
+ delete: '/v1/{name=projects/*/instances/*/databases/*/operations/*}'
+ additional_bindings:
+ - delete: '/v1/{name=projects/*/instances/*/operations/*}'
+
+documentation:
+ summary:
+ Cloud Spanner is a managed, mission-critical, globally consistent and scalable relational database service.
+
+ rules:
+ - selector: google.iam.v1.SetIamPolicyRequest.resource
+ description: |
+ REQUIRED: The Cloud Spanner resource for which the policy is being set. The format is `projects/<project ID>/instances/<instance ID>` for instance resources and `projects/<project ID>/instances/<instance ID>/databases/<database ID>` for databases resources.
+ - selector: google.iam.v1.GetIamPolicyRequest.resource
+ description: |
+ REQUIRED: The Cloud Spanner resource for which the policy is being retrieved. The format is `projects/<project ID>/instances/<instance ID>` for instance resources and `projects/<project ID>/instances/<instance ID>/databases/<database ID>` for database resources.
+ - selector: google.iam.v1.TestIamPermissionsRequest.resource
+ description: |
+ REQUIRED: The Cloud Spanner resource for which permissions are being tested. The format is `projects/<project ID>/instances/<instance ID>` for instance resources and `projects/<project ID>/instances/<instance ID>/databases/<database ID>` for database resources.
+ - selector: google.iam.v1.TestIamPermissionsRequest.permissions
+ description: |
+ REQUIRED: The set of permissions to check for 'resource'.
+ Permissions with wildcards (such as '*', 'spanner.*', 'spanner.instances.*') are not allowed.
diff --git a/third_party/googleapis/google/spanner/v1/keys.proto b/third_party/googleapis/google/spanner/v1/keys.proto
new file mode 100644
index 0000000000..2bfae6314d
--- /dev/null
+++ b/third_party/googleapis/google/spanner/v1/keys.proto
@@ -0,0 +1,162 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.spanner.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/struct.proto";
+
+option csharp_namespace = "Google.Cloud.Spanner.V1";
+option go_package = "google.golang.org/genproto/googleapis/spanner/v1;spanner";
+option java_multiple_files = true;
+option java_outer_classname = "KeysProto";
+option java_package = "com.google.spanner.v1";
+
+
+// KeyRange represents a range of rows in a table or index.
+//
+// A range has a start key and an end key. These keys can be open or
+// closed, indicating if the range includes rows with that key.
+//
+// Keys are represented by lists, where the ith value in the list
+// corresponds to the ith component of the table or index primary key.
+// Individual values are encoded as described [here][google.spanner.v1.TypeCode].
+//
+// For example, consider the following table definition:
+//
+// CREATE TABLE UserEvents (
+// UserName STRING(MAX),
+// EventDate STRING(10)
+// ) PRIMARY KEY(UserName, EventDate);
+//
+// The following keys name rows in this table:
+//
+// ["Bob", "2014-09-23"]
+// ["Alfred", "2015-06-12"]
+//
+// Since the `UserEvents` table's `PRIMARY KEY` clause names two
+// columns, each `UserEvents` key has two elements; the first is the
+// `UserName`, and the second is the `EventDate`.
+//
+// Key ranges with multiple components are interpreted
+// lexicographically by component using the table or index key's declared
+// sort order. For example, the following range returns all events for
+// user `"Bob"` that occurred in the year 2015:
+//
+// "start_closed": ["Bob", "2015-01-01"]
+// "end_closed": ["Bob", "2015-12-31"]
+//
+// Start and end keys can omit trailing key components. This affects the
+// inclusion and exclusion of rows that exactly match the provided key
+// components: if the key is closed, then rows that exactly match the
+// provided components are included; if the key is open, then rows
+// that exactly match are not included.
+//
+// For example, the following range includes all events for `"Bob"` that
+// occurred during and after the year 2000:
+//
+// "start_closed": ["Bob", "2000-01-01"]
+// "end_closed": ["Bob"]
+//
+// The next example retrieves all events for `"Bob"`:
+//
+// "start_closed": ["Bob"]
+// "end_closed": ["Bob"]
+//
+// To retrieve events before the year 2000:
+//
+// "start_closed": ["Bob"]
+// "end_open": ["Bob", "2000-01-01"]
+//
+// The following range includes all rows in the table:
+//
+// "start_closed": []
+// "end_closed": []
+//
+// This range returns all users whose `UserName` begins with any
+// character from A to C:
+//
+// "start_closed": ["A"]
+// "end_open": ["D"]
+//
+// This range returns all users whose `UserName` begins with B:
+//
+// "start_closed": ["B"]
+// "end_open": ["C"]
+//
+// Key ranges honor column sort order. For example, suppose a table is
+// defined as follows:
+//
+// CREATE TABLE DescendingSortedTable {
+// Key INT64,
+// ...
+// ) PRIMARY KEY(Key DESC);
+//
+// The following range retrieves all rows with key values between 1
+// and 100 inclusive:
+//
+// "start_closed": ["100"]
+// "end_closed": ["1"]
+//
+// Note that 100 is passed as the start, and 1 is passed as the end,
+// because `Key` is a descending column in the schema.
+message KeyRange {
+ // The start key must be provided. It can be either closed or open.
+ oneof start_key_type {
+ // If the start is closed, then the range includes all rows whose
+ // first `len(start_closed)` key columns exactly match `start_closed`.
+ google.protobuf.ListValue start_closed = 1;
+
+ // If the start is open, then the range excludes rows whose first
+ // `len(start_open)` key columns exactly match `start_open`.
+ google.protobuf.ListValue start_open = 2;
+ }
+
+ // The end key must be provided. It can be either closed or open.
+ oneof end_key_type {
+ // If the end is closed, then the range includes all rows whose
+ // first `len(end_closed)` key columns exactly match `end_closed`.
+ google.protobuf.ListValue end_closed = 3;
+
+ // If the end is open, then the range excludes rows whose first
+ // `len(end_open)` key columns exactly match `end_open`.
+ google.protobuf.ListValue end_open = 4;
+ }
+}
+
+// `KeySet` defines a collection of Cloud Spanner keys and/or key ranges. All
+// the keys are expected to be in the same table or index. The keys need
+// not be sorted in any particular way.
+//
+// If the same key is specified multiple times in the set (for example
+// if two ranges, two keys, or a key and a range overlap), Cloud Spanner
+// behaves as if the key were only specified once.
+message KeySet {
+ // A list of specific keys. Entries in `keys` should have exactly as
+ // many elements as there are columns in the primary or index key
+ // with which this `KeySet` is used. Individual key values are
+ // encoded as described [here][google.spanner.v1.TypeCode].
+ repeated google.protobuf.ListValue keys = 1;
+
+ // A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more information about
+ // key range specifications.
+ repeated KeyRange ranges = 2;
+
+ // For convenience `all` can be set to `true` to indicate that this
+ // `KeySet` matches all keys in the table or index. Note that any keys
+ // specified in `keys` or `ranges` are only yielded once.
+ bool all = 3;
+}
diff --git a/third_party/googleapis/google/spanner/v1/mutation.proto b/third_party/googleapis/google/spanner/v1/mutation.proto
new file mode 100644
index 0000000000..737af54ad1
--- /dev/null
+++ b/third_party/googleapis/google/spanner/v1/mutation.proto
@@ -0,0 +1,92 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.spanner.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/struct.proto";
+import "google/spanner/v1/keys.proto";
+
+option csharp_namespace = "Google.Cloud.Spanner.V1";
+option go_package = "google.golang.org/genproto/googleapis/spanner/v1;spanner";
+option java_multiple_files = true;
+option java_outer_classname = "MutationProto";
+option java_package = "com.google.spanner.v1";
+
+
+// A modification to one or more Cloud Spanner rows. Mutations can be
+// applied to a Cloud Spanner database by sending them in a
+// [Commit][google.spanner.v1.Spanner.Commit] call.
+message Mutation {
+ // Arguments to [insert][google.spanner.v1.Mutation.insert], [update][google.spanner.v1.Mutation.update], [insert_or_update][google.spanner.v1.Mutation.insert_or_update], and
+ // [replace][google.spanner.v1.Mutation.replace] operations.
+ message Write {
+ // Required. The table whose rows will be written.
+ string table = 1;
+
+ // The names of the columns in [table][google.spanner.v1.Mutation.Write.table] to be written.
+ //
+ // The list of columns must contain enough columns to allow
+ // Cloud Spanner to derive values for all primary key columns in the
+ // row(s) to be modified.
+ repeated string columns = 2;
+
+ // The values to be written. `values` can contain more than one
+ // list of values. If it does, then multiple rows are written, one
+ // for each entry in `values`. Each list in `values` must have
+ // exactly as many entries as there are entries in [columns][google.spanner.v1.Mutation.Write.columns]
+ // above. Sending multiple lists is equivalent to sending multiple
+ // `Mutation`s, each containing one `values` entry and repeating
+ // [table][google.spanner.v1.Mutation.Write.table] and [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in each list are
+ // encoded as described [here][google.spanner.v1.TypeCode].
+ repeated google.protobuf.ListValue values = 3;
+ }
+
+ // Arguments to [delete][google.spanner.v1.Mutation.delete] operations.
+ message Delete {
+ // Required. The table whose rows will be deleted.
+ string table = 1;
+
+ // Required. The primary keys of the rows within [table][google.spanner.v1.Mutation.Delete.table] to delete.
+ KeySet key_set = 2;
+ }
+
+ // Required. The operation to perform.
+ oneof operation {
+ // Insert new rows in a table. If any of the rows already exist,
+ // the write or transaction fails with error `ALREADY_EXISTS`.
+ Write insert = 1;
+
+ // Update existing rows in a table. If any of the rows does not
+ // already exist, the transaction fails with error `NOT_FOUND`.
+ Write update = 2;
+
+ // Like [insert][google.spanner.v1.Mutation.insert], except that if the row already exists, then
+ // its column values are overwritten with the ones provided. Any
+ // column values not explicitly written are preserved.
+ Write insert_or_update = 3;
+
+ // Like [insert][google.spanner.v1.Mutation.insert], except that if the row already exists, it is
+ // deleted, and the column values provided are inserted
+ // instead. Unlike [insert_or_update][google.spanner.v1.Mutation.insert_or_update], this means any values not
+ // explicitly written become `NULL`.
+ Write replace = 4;
+
+ // Delete rows from a table. Succeeds whether or not the named
+ // rows were present.
+ Delete delete = 5;
+ }
+}
diff --git a/third_party/googleapis/google/spanner/v1/query_plan.proto b/third_party/googleapis/google/spanner/v1/query_plan.proto
new file mode 100644
index 0000000000..b855a8ec1b
--- /dev/null
+++ b/third_party/googleapis/google/spanner/v1/query_plan.proto
@@ -0,0 +1,128 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.spanner.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/struct.proto";
+
+option csharp_namespace = "Google.Cloud.Spanner.V1";
+option go_package = "google.golang.org/genproto/googleapis/spanner/v1;spanner";
+option java_multiple_files = true;
+option java_outer_classname = "QueryPlanProto";
+option java_package = "com.google.spanner.v1";
+
+
+// Node information for nodes appearing in a [QueryPlan.plan_nodes][google.spanner.v1.QueryPlan.plan_nodes].
+message PlanNode {
+ // Metadata associated with a parent-child relationship appearing in a
+ // [PlanNode][google.spanner.v1.PlanNode].
+ message ChildLink {
+ // The node to which the link points.
+ int32 child_index = 1;
+
+ // The type of the link. For example, in Hash Joins this could be used to
+ // distinguish between the build child and the probe child, or in the case
+ // of the child being an output variable, to represent the tag associated
+ // with the output variable.
+ string type = 2;
+
+ // Only present if the child node is [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] and corresponds
+ // to an output variable of the parent node. The field carries the name of
+ // the output variable.
+ // For example, a `TableScan` operator that reads rows from a table will
+ // have child links to the `SCALAR` nodes representing the output variables
+ // created for each column that is read by the operator. The corresponding
+ // `variable` fields will be set to the variable names assigned to the
+ // columns.
+ string variable = 3;
+ }
+
+ // Condensed representation of a node and its subtree. Only present for
+ // `SCALAR` [PlanNode(s)][google.spanner.v1.PlanNode].
+ message ShortRepresentation {
+ // A string representation of the expression subtree rooted at this node.
+ string description = 1;
+
+ // A mapping of (subquery variable name) -> (subquery node id) for cases
+ // where the `description` string of this node references a `SCALAR`
+ // subquery contained in the expression subtree rooted at this node. The
+ // referenced `SCALAR` subquery may not necessarily be a direct child of
+ // this node.
+ map<string, int32> subqueries = 2;
+ }
+
+ // The kind of [PlanNode][google.spanner.v1.PlanNode]. Distinguishes between the two different kinds of
+ // nodes that can appear in a query plan.
+ enum Kind {
+ // Not specified.
+ KIND_UNSPECIFIED = 0;
+
+ // Denotes a Relational operator node in the expression tree. Relational
+ // operators represent iterative processing of rows during query execution.
+ // For example, a `TableScan` operation that reads rows from a table.
+ RELATIONAL = 1;
+
+ // Denotes a Scalar node in the expression tree. Scalar nodes represent
+ // non-iterable entities in the query plan. For example, constants or
+ // arithmetic operators appearing inside predicate expressions or references
+ // to column names.
+ SCALAR = 2;
+ }
+
+ // The `PlanNode`'s index in [node list][google.spanner.v1.QueryPlan.plan_nodes].
+ int32 index = 1;
+
+ // Used to determine the type of node. May be needed for visualizing
+ // different kinds of nodes differently. For example, If the node is a
+ // [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] node, it will have a condensed representation
+ // which can be used to directly embed a description of the node in its
+ // parent.
+ Kind kind = 2;
+
+ // The display name for the node.
+ string display_name = 3;
+
+ // List of child node `index`es and their relationship to this parent.
+ repeated ChildLink child_links = 4;
+
+ // Condensed representation for [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes.
+ ShortRepresentation short_representation = 5;
+
+ // Attributes relevant to the node contained in a group of key-value pairs.
+ // For example, a Parameter Reference node could have the following
+ // information in its metadata:
+ //
+ // {
+ // "parameter_reference": "param1",
+ // "parameter_type": "array"
+ // }
+ google.protobuf.Struct metadata = 6;
+
+ // The execution statistics associated with the node, contained in a group of
+ // key-value pairs. Only present if the plan was returned as a result of a
+ // profile query. For example, number of executions, number of rows/time per
+ // execution etc.
+ google.protobuf.Struct execution_stats = 7;
+}
+
+// Contains an ordered list of nodes appearing in the query plan.
+message QueryPlan {
+ // The nodes in the query plan. Plan nodes are returned in pre-order starting
+ // with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id` corresponds to its index in
+ // `plan_nodes`.
+ repeated PlanNode plan_nodes = 1;
+}
diff --git a/third_party/googleapis/google/spanner/v1/result_set.proto b/third_party/googleapis/google/spanner/v1/result_set.proto
new file mode 100644
index 0000000000..6f5e5e9fc6
--- /dev/null
+++ b/third_party/googleapis/google/spanner/v1/result_set.proto
@@ -0,0 +1,186 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.spanner.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/struct.proto";
+import "google/spanner/v1/query_plan.proto";
+import "google/spanner/v1/transaction.proto";
+import "google/spanner/v1/type.proto";
+
+option csharp_namespace = "Google.Cloud.Spanner.V1";
+option go_package = "google.golang.org/genproto/googleapis/spanner/v1;spanner";
+option java_multiple_files = true;
+option java_outer_classname = "ResultSetProto";
+option java_package = "com.google.spanner.v1";
+
+
+// Results from [Read][google.spanner.v1.Spanner.Read] or
+// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].
+message ResultSet {
+ // Metadata about the result set, such as row type information.
+ ResultSetMetadata metadata = 1;
+
+ // Each element in `rows` is a row whose format is defined by
+ // [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith element
+ // in each row matches the ith field in
+ // [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements are
+ // encoded based on type as described
+ // [here][google.spanner.v1.TypeCode].
+ repeated google.protobuf.ListValue rows = 2;
+
+ // Query plan and execution statistics for the query that produced this
+ // result set. These can be requested by setting
+ // [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
+ ResultSetStats stats = 3;
+}
+
+// Partial results from a streaming read or SQL query. Streaming reads and
+// SQL queries better tolerate large result sets, large rows, and large
+// values, but are a little trickier to consume.
+message PartialResultSet {
+ // Metadata about the result set, such as row type information.
+ // Only present in the first response.
+ ResultSetMetadata metadata = 1;
+
+ // A streamed result set consists of a stream of values, which might
+ // be split into many `PartialResultSet` messages to accommodate
+ // large rows and/or large values. Every N complete values defines a
+ // row, where N is equal to the number of entries in
+ // [metadata.row_type.fields][google.spanner.v1.StructType.fields].
+ //
+ // Most values are encoded based on type as described
+ // [here][google.spanner.v1.TypeCode].
+ //
+ // It is possible that the last value in values is "chunked",
+ // meaning that the rest of the value is sent in subsequent
+ // `PartialResultSet`(s). This is denoted by the [chunked_value][google.spanner.v1.PartialResultSet.chunked_value]
+ // field. Two or more chunked values can be merged to form a
+ // complete value as follows:
+ //
+ // * `bool/number/null`: cannot be chunked
+ // * `string`: concatenate the strings
+ // * `list`: concatenate the lists. If the last element in a list is a
+ // `string`, `list`, or `object`, merge it with the first element in
+ // the next list by applying these rules recursively.
+ // * `object`: concatenate the (field name, field value) pairs. If a
+ // field name is duplicated, then apply these rules recursively
+ // to merge the field values.
+ //
+ // Some examples of merging:
+ //
+ // # Strings are concatenated.
+ // "foo", "bar" => "foobar"
+ //
+ // # Lists of non-strings are concatenated.
+ // [2, 3], [4] => [2, 3, 4]
+ //
+ // # Lists are concatenated, but the last and first elements are merged
+ // # because they are strings.
+ // ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
+ //
+ // # Lists are concatenated, but the last and first elements are merged
+ // # because they are lists. Recursively, the last and first elements
+ // # of the inner lists are merged because they are strings.
+ // ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
+ //
+ // # Non-overlapping object fields are combined.
+ // {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
+ //
+ // # Overlapping object fields are merged.
+ // {"a": "1"}, {"a": "2"} => {"a": "12"}
+ //
+ // # Examples of merging objects containing lists of strings.
+ // {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
+ //
+ // For a more complete example, suppose a streaming SQL query is
+ // yielding a result set whose rows contain a single string
+ // field. The following `PartialResultSet`s might be yielded:
+ //
+ // {
+ // "metadata": { ... }
+ // "values": ["Hello", "W"]
+ // "chunked_value": true
+ // "resume_token": "Af65..."
+ // }
+ // {
+ // "values": ["orl"]
+ // "chunked_value": true
+ // "resume_token": "Bqp2..."
+ // }
+ // {
+ // "values": ["d"]
+ // "resume_token": "Zx1B..."
+ // }
+ //
+ // This sequence of `PartialResultSet`s encodes two rows, one
+ // containing the field value `"Hello"`, and a second containing the
+ // field value `"World" = "W" + "orl" + "d"`.
+ repeated google.protobuf.Value values = 2;
+
+ // If true, then the final value in [values][google.spanner.v1.PartialResultSet.values] is chunked, and must
+ // be combined with more values from subsequent `PartialResultSet`s
+ // to obtain a complete field value.
+ bool chunked_value = 3;
+
+ // Streaming calls might be interrupted for a variety of reasons, such
+ // as TCP connection loss. If this occurs, the stream of results can
+ // be resumed by re-sending the original request and including
+ // `resume_token`. Note that executing any other transaction in the
+ // same session invalidates the token.
+ bytes resume_token = 4;
+
+ // Query plan and execution statistics for the query that produced this
+ // streaming result set. These can be requested by setting
+ // [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] and are sent
+ // only once with the last response in the stream.
+ ResultSetStats stats = 5;
+}
+
+// Metadata about a [ResultSet][google.spanner.v1.ResultSet] or [PartialResultSet][google.spanner.v1.PartialResultSet].
+message ResultSetMetadata {
+ // Indicates the field names and types for the rows in the result
+ // set. For example, a SQL query like `"SELECT UserId, UserName FROM
+ // Users"` could return a `row_type` value like:
+ //
+ // "fields": [
+ // { "name": "UserId", "type": { "code": "INT64" } },
+ // { "name": "UserName", "type": { "code": "STRING" } },
+ // ]
+ StructType row_type = 1;
+
+ // If the read or SQL query began a transaction as a side-effect, the
+ // information about the new transaction is yielded here.
+ Transaction transaction = 2;
+}
+
+// Additional statistics about a [ResultSet][google.spanner.v1.ResultSet] or [PartialResultSet][google.spanner.v1.PartialResultSet].
+message ResultSetStats {
+ // [QueryPlan][google.spanner.v1.QueryPlan] for the query associated with this result.
+ QueryPlan query_plan = 1;
+
+ // Aggregated statistics from the execution of the query. Only present when
+ // the query is profiled. For example, a query could return the statistics as
+ // follows:
+ //
+ // {
+ // "rows_returned": "3",
+ // "elapsed_time": "1.22 secs",
+ // "cpu_time": "1.19 secs"
+ // }
+ google.protobuf.Struct query_stats = 2;
+}
diff --git a/third_party/googleapis/google/spanner/v1/spanner.proto b/third_party/googleapis/google/spanner/v1/spanner.proto
new file mode 100644
index 0000000000..80992f00be
--- /dev/null
+++ b/third_party/googleapis/google/spanner/v1/spanner.proto
@@ -0,0 +1,348 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.spanner.v1;
+
+import "google/api/annotations.proto";
+import "google/api/auth.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/struct.proto";
+import "google/protobuf/timestamp.proto";
+import "google/spanner/v1/keys.proto";
+import "google/spanner/v1/mutation.proto";
+import "google/spanner/v1/result_set.proto";
+import "google/spanner/v1/transaction.proto";
+import "google/spanner/v1/type.proto";
+
+option csharp_namespace = "Google.Cloud.Spanner.V1";
+option go_package = "google.golang.org/genproto/googleapis/spanner/v1;spanner";
+option java_multiple_files = true;
+option java_outer_classname = "SpannerProto";
+option java_package = "com.google.spanner.v1";
+
+
+// Cloud Spanner API
+//
+// The Cloud Spanner API can be used to manage sessions and execute
+// transactions on data stored in Cloud Spanner databases.
+service Spanner {
+ // Creates a new session. A session can be used to perform
+ // transactions that read and/or modify data in a Cloud Spanner database.
+ // Sessions are meant to be reused for many consecutive
+ // transactions.
+ //
+ // Sessions can only execute one transaction at a time. To execute
+ // multiple concurrent read-write/write-only transactions, create
+ // multiple sessions. Note that standalone reads and queries use a
+ // transaction internally, and count toward the one transaction
+ // limit.
+ //
+ // Cloud Spanner limits the number of sessions that can exist at any given
+ // time; thus, it is a good idea to delete idle and/or unneeded sessions.
+ // Aside from explicit deletes, Cloud Spanner can delete sessions for which no
+ // operations are sent for more than an hour. If a session is deleted,
+ // requests to it return `NOT_FOUND`.
+ //
+ // Idle sessions can be kept alive by sending a trivial SQL query
+ // periodically, e.g., `"SELECT 1"`.
+ rpc CreateSession(CreateSessionRequest) returns (Session) {
+ option (google.api.http) = { post: "/v1/{database=projects/*/instances/*/databases/*}/sessions" body: "" };
+ }
+
+ // Gets a session. Returns `NOT_FOUND` if the session does not exist.
+ // This is mainly useful for determining whether a session is still
+ // alive.
+ rpc GetSession(GetSessionRequest) returns (Session) {
+ option (google.api.http) = { get: "/v1/{name=projects/*/instances/*/databases/*/sessions/*}" };
+ }
+
+ // Ends a session, releasing server resources associated with it.
+ rpc DeleteSession(DeleteSessionRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/{name=projects/*/instances/*/databases/*/sessions/*}" };
+ }
+
+ // Executes an SQL query, returning all rows in a single reply. This
+ // method cannot be used to return a result set larger than 10 MiB;
+ // if the query yields more data than that, the query fails with
+ // a `FAILED_PRECONDITION` error.
+ //
+ // Queries inside read-write transactions might return `ABORTED`. If
+ // this occurs, the application should restart the transaction from
+ // the beginning. See [Transaction][google.spanner.v1.Transaction] for more details.
+ //
+ // Larger result sets can be fetched in streaming fashion by calling
+ // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] instead.
+ rpc ExecuteSql(ExecuteSqlRequest) returns (ResultSet) {
+ option (google.api.http) = { post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeSql" body: "*" };
+ }
+
+ // Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the result
+ // set as a stream. Unlike [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there
+ // is no limit on the size of the returned result set. However, no
+ // individual row in the result set can exceed 100 MiB, and no
+ // column value can exceed 10 MiB.
+ rpc ExecuteStreamingSql(ExecuteSqlRequest) returns (stream PartialResultSet) {
+ option (google.api.http) = { post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeStreamingSql" body: "*" };
+ }
+
+ // Reads rows from the database using key lookups and scans, as a
+ // simple key/value style alternative to
+ // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be used to
+ // return a result set larger than 10 MiB; if the read matches more
+ // data than that, the read fails with a `FAILED_PRECONDITION`
+ // error.
+ //
+ // Reads inside read-write transactions might return `ABORTED`. If
+ // this occurs, the application should restart the transaction from
+ // the beginning. See [Transaction][google.spanner.v1.Transaction] for more details.
+ //
+ // Larger result sets can be yielded in streaming fashion by calling
+ // [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead.
+ rpc Read(ReadRequest) returns (ResultSet) {
+ option (google.api.http) = { post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:read" body: "*" };
+ }
+
+ // Like [Read][google.spanner.v1.Spanner.Read], except returns the result set as a
+ // stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no limit on the
+ // size of the returned result set. However, no individual row in
+ // the result set can exceed 100 MiB, and no column value can exceed
+ // 10 MiB.
+ rpc StreamingRead(ReadRequest) returns (stream PartialResultSet) {
+ option (google.api.http) = { post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:streamingRead" body: "*" };
+ }
+
+ // Begins a new transaction. This step can often be skipped:
+ // [Read][google.spanner.v1.Spanner.Read], [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
+ // [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a
+ // side-effect.
+ rpc BeginTransaction(BeginTransactionRequest) returns (Transaction) {
+ option (google.api.http) = { post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:beginTransaction" body: "*" };
+ }
+
+ // Commits a transaction. The request includes the mutations to be
+ // applied to rows in the database.
+ //
+ // `Commit` might return an `ABORTED` error. This can occur at any time;
+ // commonly, the cause is conflicts with concurrent
+ // transactions. However, it can also happen for a variety of other
+ // reasons. If `Commit` returns `ABORTED`, the caller should re-attempt
+ // the transaction from the beginning, re-using the same session.
+ rpc Commit(CommitRequest) returns (CommitResponse) {
+ option (google.api.http) = { post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:commit" body: "*" };
+ }
+
+ // Rolls back a transaction, releasing any locks it holds. It is a good
+ // idea to call this for any transaction that includes one or more
+ // [Read][google.spanner.v1.Spanner.Read] or [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and
+ // ultimately decides not to commit.
+ //
+ // `Rollback` returns `OK` if it successfully aborts the transaction, the
+ // transaction was already aborted, or the transaction is not
+ // found. `Rollback` never returns `ABORTED`.
+ rpc Rollback(RollbackRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:rollback" body: "*" };
+ }
+}
+
+// The request for [CreateSession][google.spanner.v1.Spanner.CreateSession].
+message CreateSessionRequest {
+ // Required. The database in which the new session is created.
+ string database = 1;
+}
+
+// A session in the Cloud Spanner API.
+message Session {
+ // Required. The name of the session.
+ string name = 1;
+}
+
+// The request for [GetSession][google.spanner.v1.Spanner.GetSession].
+message GetSessionRequest {
+ // Required. The name of the session to retrieve.
+ string name = 1;
+}
+
+// The request for [DeleteSession][google.spanner.v1.Spanner.DeleteSession].
+message DeleteSessionRequest {
+ // Required. The name of the session to delete.
+ string name = 1;
+}
+
+// The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
+// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql].
+message ExecuteSqlRequest {
+ // Mode in which the query must be processed.
+ enum QueryMode {
+ // The default mode where only the query result, without any information
+ // about the query plan is returned.
+ NORMAL = 0;
+
+ // This mode returns only the query plan, without any result rows or
+ // execution statistics information.
+ PLAN = 1;
+
+ // This mode returns both the query plan and the execution statistics along
+ // with the result rows.
+ PROFILE = 2;
+ }
+
+ // Required. The session in which the SQL query should be performed.
+ string session = 1;
+
+ // The transaction to use. If none is provided, the default is a
+ // temporary read-only transaction with strong concurrency.
+ TransactionSelector transaction = 2;
+
+ // Required. The SQL query string.
+ string sql = 3;
+
+ // The SQL query string can contain parameter placeholders. A parameter
+ // placeholder consists of `'@'` followed by the parameter
+ // name. Parameter names consist of any combination of letters,
+ // numbers, and underscores.
+ //
+ // Parameters can appear anywhere that a literal value is expected. The same
+ // parameter name can be used more than once, for example:
+ // `"WHERE id > @msg_id AND id < @msg_id + 100"`
+ //
+ // It is an error to execute an SQL query with unbound parameters.
+ //
+ // Parameter values are specified using `params`, which is a JSON
+ // object whose keys are parameter names, and whose values are the
+ // corresponding parameter values.
+ google.protobuf.Struct params = 4;
+
+ // It is not always possible for Cloud Spanner to infer the right SQL type
+ // from a JSON value. For example, values of type `BYTES` and values
+ // of type `STRING` both appear in [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings.
+ //
+ // In these cases, `param_types` can be used to specify the exact
+ // SQL type for some or all of the SQL query parameters. See the
+ // definition of [Type][google.spanner.v1.Type] for more information
+ // about SQL types.
+ map<string, Type> param_types = 5;
+
+ // If this request is resuming a previously interrupted SQL query
+ // execution, `resume_token` should be copied from the last
+ // [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the interruption. Doing this
+ // enables the new SQL query execution to resume where the last one left
+ // off. The rest of the request parameters must exactly match the
+ // request that yielded this token.
+ bytes resume_token = 6;
+
+ // Used to control the amount of debugging information returned in
+ // [ResultSetStats][google.spanner.v1.ResultSetStats].
+ QueryMode query_mode = 7;
+}
+
+// The request for [Read][google.spanner.v1.Spanner.Read] and
+// [StreamingRead][google.spanner.v1.Spanner.StreamingRead].
+message ReadRequest {
+ // Required. The session in which the read should be performed.
+ string session = 1;
+
+ // The transaction to use. If none is provided, the default is a
+ // temporary read-only transaction with strong concurrency.
+ TransactionSelector transaction = 2;
+
+ // Required. The name of the table in the database to be read.
+ string table = 3;
+
+ // If non-empty, the name of an index on [table][google.spanner.v1.ReadRequest.table]. This index is
+ // used instead of the table primary key when interpreting [key_set][google.spanner.v1.ReadRequest.key_set]
+ // and sorting result rows. See [key_set][google.spanner.v1.ReadRequest.key_set] for further information.
+ string index = 4;
+
+ // The columns of [table][google.spanner.v1.ReadRequest.table] to be returned for each row matching
+ // this request.
+ repeated string columns = 5;
+
+ // Required. `key_set` identifies the rows to be yielded. `key_set` names the
+ // primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to be yielded, unless [index][google.spanner.v1.ReadRequest.index]
+ // is present. If [index][google.spanner.v1.ReadRequest.index] is present, then [key_set][google.spanner.v1.ReadRequest.key_set] instead names
+ // index keys in [index][google.spanner.v1.ReadRequest.index].
+ //
+ // Rows are yielded in table primary key order (if [index][google.spanner.v1.ReadRequest.index] is empty)
+ // or index key order (if [index][google.spanner.v1.ReadRequest.index] is non-empty).
+ //
+ // It is not an error for the `key_set` to name rows that do not
+ // exist in the database. Read yields nothing for nonexistent rows.
+ KeySet key_set = 6;
+
+ // If greater than zero, only the first `limit` rows are yielded. If `limit`
+ // is zero, the default is no limit.
+ int64 limit = 8;
+
+ // If this request is resuming a previously interrupted read,
+ // `resume_token` should be copied from the last
+ // [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the interruption. Doing this
+ // enables the new read to resume where the last read left off. The
+ // rest of the request parameters must exactly match the request
+ // that yielded this token.
+ bytes resume_token = 9;
+}
+
+// The request for [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction].
+message BeginTransactionRequest {
+ // Required. The session in which the transaction runs.
+ string session = 1;
+
+ // Required. Options for the new transaction.
+ TransactionOptions options = 2;
+}
+
+// The request for [Commit][google.spanner.v1.Spanner.Commit].
+message CommitRequest {
+ // Required. The session in which the transaction to be committed is running.
+ string session = 1;
+
+ // Required. The transaction in which to commit.
+ oneof transaction {
+ // Commit a previously-started transaction.
+ bytes transaction_id = 2;
+
+ // Execute mutations in a temporary transaction. Note that unlike
+ // commit of a previously-started transaction, commit with a
+ // temporary transaction is non-idempotent. That is, if the
+ // `CommitRequest` is sent to Cloud Spanner more than once (for
+ // instance, due to retries in the application, or in the
+ // transport library), it is possible that the mutations are
+ // executed more than once. If this is undesirable, use
+ // [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and
+ // [Commit][google.spanner.v1.Spanner.Commit] instead.
+ TransactionOptions single_use_transaction = 3;
+ }
+
+ // The mutations to be executed when this transaction commits. All
+ // mutations are applied atomically, in the order they appear in
+ // this list.
+ repeated Mutation mutations = 4;
+}
+
+// The response for [Commit][google.spanner.v1.Spanner.Commit].
+message CommitResponse {
+ // The Cloud Spanner timestamp at which the transaction committed.
+ google.protobuf.Timestamp commit_timestamp = 1;
+}
+
+// The request for [Rollback][google.spanner.v1.Spanner.Rollback].
+message RollbackRequest {
+ // Required. The session in which the transaction to roll back is running.
+ string session = 1;
+
+ // Required. The transaction to roll back.
+ bytes transaction_id = 2;
+}
diff --git a/third_party/googleapis/google/spanner/v1/spanner_gapic.yaml b/third_party/googleapis/google/spanner/v1/spanner_gapic.yaml
new file mode 100644
index 0000000000..10341858bd
--- /dev/null
+++ b/third_party/googleapis/google/spanner/v1/spanner_gapic.yaml
@@ -0,0 +1,216 @@
+type: com.google.api.codegen.ConfigProto
+language_settings:
+ java:
+ package_name: com.google.cloud.spanner.spi.v1
+ python:
+ package_name: google.cloud.gapic.spanner.v1
+ go:
+ package_name: cloud.google.com/go/spanner/apiv1
+ csharp:
+ package_name: Google.Cloud.Spanner.V1
+ ruby:
+ package_name: Google::Cloud::Spanner::V1
+ php:
+ package_name: Google\Cloud\Spanner\V1
+ nodejs:
+ package_name: spanner.v1
+ domain_layer_location: google-cloud
+license_header:
+ copyright_file: copyright-google.txt
+ license_file: license-header-apache-2.0.txt
+interfaces:
+- name: google.spanner.v1.Spanner
+ collections:
+ - name_pattern: projects/{project}/instances/{instance}/databases/{database}
+ entity_name: database
+ - name_pattern: projects/{project}/instances/{instance}/databases/{database}/sessions/{session}
+ entity_name: session
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 1000
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 32000
+ initial_rpc_timeout_millis: 60000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 60000
+ total_timeout_millis: 600000
+ methods:
+ - name: CreateSession
+ flattening:
+ groups:
+ - parameters:
+ - database
+ required_fields:
+ - database
+ request_object_method: false
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ database: database
+ timeout_millis: 30000
+ - name: GetSession
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: false
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: session
+ timeout_millis: 30000
+ - name: DeleteSession
+ flattening:
+ groups:
+ - parameters:
+ - name
+ required_fields:
+ - name
+ request_object_method: false
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ name: session
+ timeout_millis: 30000
+ - name: ExecuteSql
+ required_fields:
+ - session
+ - sql
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ session: session
+ timeout_millis: 30000
+ - name: ExecuteStreamingSql
+ required_fields:
+ - session
+ - sql
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ session: session
+ timeout_millis: 30000
+ - name: Read
+ required_fields:
+ - session
+ - table
+ - columns
+ - key_set
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ session: session
+ timeout_millis: 30000
+ - name: StreamingRead
+ required_fields:
+ - session
+ - table
+ - columns
+ - key_set
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ session: session
+ timeout_millis: 30000
+ - name: BeginTransaction
+ flattening:
+ groups:
+ - parameters:
+ - session
+ - options
+ required_fields:
+ - session
+ - options
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ session: session
+ timeout_millis: 30000
+ - name: Commit
+ flattening:
+ groups:
+ - parameters:
+ - session
+ - transaction_id
+ - mutations
+ - parameters:
+ - session
+ - single_use_transaction
+ - mutations
+ required_fields:
+ - session
+ - mutations
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ session: session
+ timeout_millis: 30000
+ - name: Rollback
+ flattening:
+ groups:
+ - parameters:
+ - session
+ - transaction_id
+ required_fields:
+ - session
+ - transaction_id
+ request_object_method: true
+ resource_name_treatment: STATIC_TYPES
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ session: session
+ timeout_millis: 30000
+resource_name_generation:
+- message_name: CreateSessionRequest
+ field_entity_map:
+ database: database
+- message_name: Session
+ field_entity_map:
+ name: session
+- message_name: GetSessionRequest
+ field_entity_map:
+ name: session
+- message_name: DeleteSessionRequest
+ field_entity_map:
+ name: session
+- message_name: ExecuteSqlRequest
+ field_entity_map:
+ session: session
+- message_name: ReadRequest
+ field_entity_map:
+ session: session
+- message_name: BeginTransactionRequest
+ field_entity_map:
+ session: session
+- message_name: CommitRequest
+ field_entity_map:
+ session: session
+- message_name: RollbackRequest
+ field_entity_map:
+ session: session
diff --git a/third_party/googleapis/google/spanner/v1/transaction.proto b/third_party/googleapis/google/spanner/v1/transaction.proto
new file mode 100644
index 0000000000..e3ecf67894
--- /dev/null
+++ b/third_party/googleapis/google/spanner/v1/transaction.proto
@@ -0,0 +1,373 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.spanner.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/timestamp.proto";
+
+option csharp_namespace = "Google.Cloud.Spanner.V1";
+option go_package = "google.golang.org/genproto/googleapis/spanner/v1;spanner";
+option java_multiple_files = true;
+option java_outer_classname = "TransactionProto";
+option java_package = "com.google.spanner.v1";
+
+
+// # Transactions
+//
+//
+// Each session can have at most one active transaction at a time. After the
+// active transaction is completed, the session can immediately be
+// re-used for the next transaction. It is not necessary to create a
+// new session for each transaction.
+//
+// # Transaction Modes
+//
+// Cloud Spanner supports two transaction modes:
+//
+// 1. Locking read-write. This type of transaction is the only way
+// to write data into Cloud Spanner. These transactions rely on
+// pessimistic locking and, if necessary, two-phase commit.
+// Locking read-write transactions may abort, requiring the
+// application to retry.
+//
+// 2. Snapshot read-only. This transaction type provides guaranteed
+// consistency across several reads, but does not allow
+// writes. Snapshot read-only transactions can be configured to
+// read at timestamps in the past. Snapshot read-only
+// transactions do not need to be committed.
+//
+// For transactions that only read, snapshot read-only transactions
+// provide simpler semantics and are almost always faster. In
+// particular, read-only transactions do not take locks, so they do
+// not conflict with read-write transactions. As a consequence of not
+// taking locks, they also do not abort, so retry loops are not needed.
+//
+// Transactions may only read/write data in a single database. They
+// may, however, read/write data in different tables within that
+// database.
+//
+// ## Locking Read-Write Transactions
+//
+// Locking transactions may be used to atomically read-modify-write
+// data anywhere in a database. This type of transaction is externally
+// consistent.
+//
+// Clients should attempt to minimize the amount of time a transaction
+// is active. Faster transactions commit with higher probability
+// and cause less contention. Cloud Spanner attempts to keep read locks
+// active as long as the transaction continues to do reads, and the
+// transaction has not been terminated by
+// [Commit][google.spanner.v1.Spanner.Commit] or
+// [Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of
+// inactivity at the client may cause Cloud Spanner to release a
+// transaction's locks and abort it.
+//
+// Reads performed within a transaction acquire locks on the data
+// being read. Writes can only be done at commit time, after all reads
+// have been completed.
+// Conceptually, a read-write transaction consists of zero or more
+// reads or SQL queries followed by
+// [Commit][google.spanner.v1.Spanner.Commit]. At any time before
+// [Commit][google.spanner.v1.Spanner.Commit], the client can send a
+// [Rollback][google.spanner.v1.Spanner.Rollback] request to abort the
+// transaction.
+//
+// ### Semantics
+//
+// Cloud Spanner can commit the transaction if all read locks it acquired
+// are still valid at commit time, and it is able to acquire write
+// locks for all writes. Cloud Spanner can abort the transaction for any
+// reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
+// that the transaction has not modified any user data in Cloud Spanner.
+//
+// Unless the transaction commits, Cloud Spanner makes no guarantees about
+// how long the transaction's locks were held for. It is an error to
+// use Cloud Spanner locks for any sort of mutual exclusion other than
+// between Cloud Spanner transactions themselves.
+//
+// ### Retrying Aborted Transactions
+//
+// When a transaction aborts, the application can choose to retry the
+// whole transaction again. To maximize the chances of successfully
+// committing the retry, the client should execute the retry in the
+// same session as the original attempt. The original session's lock
+// priority increases with each consecutive abort, meaning that each
+// attempt has a slightly better chance of success than the previous.
+//
+// Under some circumstances (e.g., many transactions attempting to
+// modify the same row(s)), a transaction can abort many times in a
+// short period before successfully committing. Thus, it is not a good
+// idea to cap the number of retries a transaction can attempt;
+// instead, it is better to limit the total amount of wall time spent
+// retrying.
+//
+// ### Idle Transactions
+//
+// A transaction is considered idle if it has no outstanding reads or
+// SQL queries and has not started a read or SQL query within the last 10
+// seconds. Idle transactions can be aborted by Cloud Spanner so that they
+// don't hold on to locks indefinitely. In that case, the commit will
+// fail with error `ABORTED`.
+//
+// If this behavior is undesirable, periodically executing a simple
+// SQL query in the transaction (e.g., `SELECT 1`) prevents the
+// transaction from becoming idle.
+//
+// ## Snapshot Read-Only Transactions
+//
+// Snapshot read-only transactions provides a simpler method than
+// locking read-write transactions for doing several consistent
+// reads. However, this type of transaction does not support writes.
+//
+// Snapshot transactions do not take locks. Instead, they work by
+// choosing a Cloud Spanner timestamp, then executing all reads at that
+// timestamp. Since they do not acquire locks, they do not block
+// concurrent read-write transactions.
+//
+// Unlike locking read-write transactions, snapshot read-only
+// transactions never abort. They can fail if the chosen read
+// timestamp is garbage collected; however, the default garbage
+// collection policy is generous enough that most applications do not
+// need to worry about this in practice.
+//
+// Snapshot read-only transactions do not need to call
+// [Commit][google.spanner.v1.Spanner.Commit] or
+// [Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not
+// permitted to do so).
+//
+// To execute a snapshot transaction, the client specifies a timestamp
+// bound, which tells Cloud Spanner how to choose a read timestamp.
+//
+// The types of timestamp bound are:
+//
+// - Strong (the default).
+// - Bounded staleness.
+// - Exact staleness.
+//
+// If the Cloud Spanner database to be read is geographically distributed,
+// stale read-only transactions can execute more quickly than strong
+// or read-write transaction, because they are able to execute far
+// from the leader replica.
+//
+// Each type of timestamp bound is discussed in detail below.
+//
+// ### Strong
+//
+// Strong reads are guaranteed to see the effects of all transactions
+// that have committed before the start of the read. Furthermore, all
+// rows yielded by a single read are consistent with each other -- if
+// any part of the read observes a transaction, all parts of the read
+// see the transaction.
+//
+// Strong reads are not repeatable: two consecutive strong read-only
+// transactions might return inconsistent results if there are
+// concurrent writes. If consistency across reads is required, the
+// reads should be executed within a transaction or at an exact read
+// timestamp.
+//
+// See [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong].
+//
+// ### Exact Staleness
+//
+// These timestamp bounds execute reads at a user-specified
+// timestamp. Reads at a timestamp are guaranteed to see a consistent
+// prefix of the global transaction history: they observe
+// modifications done by all transactions with a commit timestamp <=
+// the read timestamp, and observe none of the modifications done by
+// transactions with a larger commit timestamp. They will block until
+// all conflicting transactions that may be assigned commit timestamps
+// <= the read timestamp have finished.
+//
+// The timestamp can either be expressed as an absolute Cloud Spanner commit
+// timestamp or a staleness relative to the current time.
+//
+// These modes do not require a "negotiation phase" to pick a
+// timestamp. As a result, they execute slightly faster than the
+// equivalent boundedly stale concurrency modes. On the other hand,
+// boundedly stale reads usually return fresher results.
+//
+// See [TransactionOptions.ReadOnly.read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp] and
+// [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness].
+//
+// ### Bounded Staleness
+//
+// Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
+// subject to a user-provided staleness bound. Cloud Spanner chooses the
+// newest timestamp within the staleness bound that allows execution
+// of the reads at the closest available replica without blocking.
+//
+// All rows yielded are consistent with each other -- if any part of
+// the read observes a transaction, all parts of the read see the
+// transaction. Boundedly stale reads are not repeatable: two stale
+// reads, even if they use the same staleness bound, can execute at
+// different timestamps and thus return inconsistent results.
+//
+// Boundedly stale reads execute in two phases: the first phase
+// negotiates a timestamp among all replicas needed to serve the
+// read. In the second phase, reads are executed at the negotiated
+// timestamp.
+//
+// As a result of the two phase execution, bounded staleness reads are
+// usually a little slower than comparable exact staleness
+// reads. However, they are typically able to return fresher
+// results, and are more likely to execute at the closest replica.
+//
+// Because the timestamp negotiation requires up-front knowledge of
+// which rows will be read, it can only be used with single-use
+// read-only transactions.
+//
+// See [TransactionOptions.ReadOnly.max_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max_staleness] and
+// [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp].
+//
+// ### Old Read Timestamps and Garbage Collection
+//
+// Cloud Spanner continuously garbage collects deleted and overwritten data
+// in the background to reclaim storage space. This process is known
+// as "version GC". By default, version GC reclaims versions after they
+// are one hour old. Because of this, Cloud Spanner cannot perform reads
+// at read timestamps more than one hour in the past. This
+// restriction also applies to in-progress reads and/or SQL queries whose
+// timestamp become too old while executing. Reads and SQL queries with
+// too-old read timestamps fail with the error `FAILED_PRECONDITION`.
+message TransactionOptions {
+ // Options for read-write transactions.
+ message ReadWrite {
+
+ }
+
+ // Options for read-only transactions.
+ message ReadOnly {
+ // How to choose the timestamp for the read-only transaction.
+ oneof timestamp_bound {
+ // Read at a timestamp where all previously committed transactions
+ // are visible.
+ bool strong = 1;
+
+ // Executes all reads at a timestamp >= `min_read_timestamp`.
+ //
+ // This is useful for requesting fresher data than some previous
+ // read, or data that is fresh enough to observe the effects of some
+ // previously committed transaction whose timestamp is known.
+ //
+ // Note that this option can only be used in single-use transactions.
+ google.protobuf.Timestamp min_read_timestamp = 2;
+
+ // Read data at a timestamp >= `NOW - max_staleness`
+ // seconds. Guarantees that all writes that have committed more
+ // than the specified number of seconds ago are visible. Because
+ // Cloud Spanner chooses the exact timestamp, this mode works even if
+ // the client's local clock is substantially skewed from Cloud Spanner
+ // commit timestamps.
+ //
+ // Useful for reading the freshest data available at a nearby
+ // replica, while bounding the possible staleness if the local
+ // replica has fallen behind.
+ //
+ // Note that this option can only be used in single-use
+ // transactions.
+ google.protobuf.Duration max_staleness = 3;
+
+ // Executes all reads at the given timestamp. Unlike other modes,
+ // reads at a specific timestamp are repeatable; the same read at
+ // the same timestamp always returns the same data. If the
+ // timestamp is in the future, the read will block until the
+ // specified timestamp, modulo the read's deadline.
+ //
+ // Useful for large scale consistent reads such as mapreduces, or
+ // for coordinating many reads against a consistent snapshot of the
+ // data.
+ google.protobuf.Timestamp read_timestamp = 4;
+
+ // Executes all reads at a timestamp that is `exact_staleness`
+ // old. The timestamp is chosen soon after the read is started.
+ //
+ // Guarantees that all writes that have committed more than the
+ // specified number of seconds ago are visible. Because Cloud Spanner
+ // chooses the exact timestamp, this mode works even if the client's
+ // local clock is substantially skewed from Cloud Spanner commit
+ // timestamps.
+ //
+ // Useful for reading at nearby replicas without the distributed
+ // timestamp negotiation overhead of `max_staleness`.
+ google.protobuf.Duration exact_staleness = 5;
+ }
+
+ // If true, the Cloud Spanner-selected read timestamp is included in
+ // the [Transaction][google.spanner.v1.Transaction] message that describes the transaction.
+ bool return_read_timestamp = 6;
+ }
+
+ // Required. The type of transaction.
+ oneof mode {
+ // Transaction may write.
+ //
+ // Authorization to begin a read-write transaction requires
+ // `spanner.databases.beginOrRollbackReadWriteTransaction` permission
+ // on the `session` resource.
+ ReadWrite read_write = 1;
+
+ // Transaction will not write.
+ //
+ // Authorization to begin a read-only transaction requires
+ // `spanner.databases.beginReadOnlyTransaction` permission
+ // on the `session` resource.
+ ReadOnly read_only = 2;
+ }
+}
+
+// A transaction.
+message Transaction {
+ // `id` may be used to identify the transaction in subsequent
+ // [Read][google.spanner.v1.Spanner.Read],
+ // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql],
+ // [Commit][google.spanner.v1.Spanner.Commit], or
+ // [Rollback][google.spanner.v1.Spanner.Rollback] calls.
+ //
+ // Single-use read-only transactions do not have IDs, because
+ // single-use transactions do not support multiple requests.
+ bytes id = 1;
+
+ // For snapshot read-only transactions, the read timestamp chosen
+ // for the transaction. Not returned by default: see
+ // [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp].
+ google.protobuf.Timestamp read_timestamp = 2;
+}
+
+// This message is used to select the transaction in which a
+// [Read][google.spanner.v1.Spanner.Read] or
+// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] call runs.
+//
+// See [TransactionOptions][google.spanner.v1.TransactionOptions] for more information about transactions.
+message TransactionSelector {
+ // If no fields are set, the default is a single use transaction
+ // with strong concurrency.
+ oneof selector {
+ // Execute the read or SQL query in a temporary transaction.
+ // This is the most efficient way to execute a transaction that
+ // consists of a single SQL query.
+ TransactionOptions single_use = 1;
+
+ // Execute the read or SQL query in a previously-started transaction.
+ bytes id = 2;
+
+ // Begin a new transaction and execute this read or SQL query in
+ // it. The transaction ID of the new transaction is returned in
+ // [ResultSetMetadata.transaction][google.spanner.v1.ResultSetMetadata.transaction], which is a [Transaction][google.spanner.v1.Transaction].
+ TransactionOptions begin = 3;
+ }
+}
diff --git a/third_party/googleapis/google/spanner/v1/type.proto b/third_party/googleapis/google/spanner/v1/type.proto
new file mode 100644
index 0000000000..76a941ff74
--- /dev/null
+++ b/third_party/googleapis/google/spanner/v1/type.proto
@@ -0,0 +1,111 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.spanner.v1;
+
+import "google/api/annotations.proto";
+
+option csharp_namespace = "Google.Cloud.Spanner.V1";
+option go_package = "google.golang.org/genproto/googleapis/spanner/v1;spanner";
+option java_multiple_files = true;
+option java_outer_classname = "TypeProto";
+option java_package = "com.google.spanner.v1";
+
+
+// `Type` indicates the type of a Cloud Spanner value, as might be stored in a
+// table cell or returned from an SQL query.
+message Type {
+ // Required. The [TypeCode][google.spanner.v1.TypeCode] for this type.
+ TypeCode code = 1;
+
+ // If [code][google.spanner.v1.Type.code] == [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type`
+ // is the type of the array elements.
+ Type array_element_type = 2;
+
+ // If [code][google.spanner.v1.Type.code] == [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type`
+ // provides type information for the struct's fields.
+ StructType struct_type = 3;
+}
+
+// `StructType` defines the fields of a [STRUCT][google.spanner.v1.TypeCode.STRUCT] type.
+message StructType {
+ // Message representing a single field of a struct.
+ message Field {
+ // The name of the field. For reads, this is the column name. For
+ // SQL queries, it is the column alias (e.g., `"Word"` in the
+ // query `"SELECT 'hello' AS Word"`), or the column name (e.g.,
+ // `"ColName"` in the query `"SELECT ColName FROM Table"`). Some
+ // columns might have an empty name (e.g., !"SELECT
+ // UPPER(ColName)"`). Note that a query result can contain
+ // multiple fields with the same name.
+ string name = 1;
+
+ // The type of the field.
+ Type type = 2;
+ }
+
+ // The list of fields that make up this struct. Order is
+ // significant, because values of this struct type are represented as
+ // lists, where the order of field values matches the order of
+ // fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields
+ // matches the order of columns in a read request, or the order of
+ // fields in the `SELECT` clause of a query.
+ repeated Field fields = 1;
+}
+
+// `TypeCode` is used as part of [Type][google.spanner.v1.Type] to
+// indicate the type of a Cloud Spanner value.
+//
+// Each legal value of a type can be encoded to or decoded from a JSON
+// value, using the encodings described below. All Cloud Spanner values can
+// be `null`, regardless of type; `null`s are always encoded as a JSON
+// `null`.
+enum TypeCode {
+ // Not specified.
+ TYPE_CODE_UNSPECIFIED = 0;
+
+ // Encoded as JSON `true` or `false`.
+ BOOL = 1;
+
+ // Encoded as `string`, in decimal format.
+ INT64 = 2;
+
+ // Encoded as `number`, or the strings `"NaN"`, `"Infinity"`, or
+ // `"-Infinity"`.
+ FLOAT64 = 3;
+
+ // Encoded as `string` in RFC 3339 timestamp format. The time zone
+ // must be present, and must be `"Z"`.
+ TIMESTAMP = 4;
+
+ // Encoded as `string` in RFC 3339 date format.
+ DATE = 5;
+
+ // Encoded as `string`.
+ STRING = 6;
+
+ // Encoded as a base64-encoded `string`, as described in RFC 4648,
+ // section 4.
+ BYTES = 7;
+
+ // Encoded as `list`, where the list elements are represented
+ // according to [array_element_type][google.spanner.v1.Type.array_element_type].
+ ARRAY = 8;
+
+ // Encoded as `list`, where list element `i` is represented according
+ // to [struct_type.fields[i]][google.spanner.v1.StructType.fields].
+ STRUCT = 9;
+}
diff --git a/third_party/googleapis/google/storagetransfer/v1/transfer.proto b/third_party/googleapis/google/storagetransfer/v1/transfer.proto
new file mode 100644
index 0000000000..5b11438dd2
--- /dev/null
+++ b/third_party/googleapis/google/storagetransfer/v1/transfer.proto
@@ -0,0 +1,168 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.storagetransfer.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/field_mask.proto";
+import "google/storagetransfer/v1/transfer_types.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/storagetransfer/v1;storagetransfer";
+option java_outer_classname = "TransferProto";
+option java_package = "com.google.storagetransfer.v1.proto";
+
+
+// Transfers data between between Google Cloud Storage buckets or from a data
+// source external to Google to a Cloud Storage bucket.
+service StorageTransferService {
+ // Returns the Google service account that is used by Storage Transfer
+ // Service to access buckets in the project where transfers
+ // run or in other projects. Each Google service account is associated
+ // with one Google Cloud Platform Console project. Users
+ // should add this service account to the Google Cloud Storage bucket
+ // ACLs to grant access to Storage Transfer Service. This service
+ // account is created and owned by Storage Transfer Service and can
+ // only be used by Storage Transfer Service.
+ rpc GetGoogleServiceAccount(GetGoogleServiceAccountRequest) returns (GoogleServiceAccount) {
+ option (google.api.http) = { get: "/v1/googleServiceAccounts/{project_id}" };
+ }
+
+ // Creates a transfer job that runs periodically.
+ rpc CreateTransferJob(CreateTransferJobRequest) returns (TransferJob) {
+ option (google.api.http) = { post: "/v1/transferJobs" body: "transfer_job" };
+ }
+
+ // Updates a transfer job. Updating a job's transfer spec does not affect
+ // transfer operations that are running already. Updating the scheduling
+ // of a job is not allowed.
+ rpc UpdateTransferJob(UpdateTransferJobRequest) returns (TransferJob) {
+ option (google.api.http) = { patch: "/v1/{job_name=transferJobs/**}" body: "*" };
+ }
+
+ // Gets a transfer job.
+ rpc GetTransferJob(GetTransferJobRequest) returns (TransferJob) {
+ option (google.api.http) = { get: "/v1/{job_name=transferJobs/**}" };
+ }
+
+ // Lists transfer jobs.
+ rpc ListTransferJobs(ListTransferJobsRequest) returns (ListTransferJobsResponse) {
+ option (google.api.http) = { get: "/v1/transferJobs" };
+ }
+
+ // Pauses a transfer operation.
+ rpc PauseTransferOperation(PauseTransferOperationRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { post: "/v1/{name=transferOperations/**}:pause" body: "*" };
+ }
+
+ // Resumes a transfer operation that is paused.
+ rpc ResumeTransferOperation(ResumeTransferOperationRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { post: "/v1/{name=transferOperations/**}:resume" body: "*" };
+ }
+}
+
+// Request passed to GetGoogleServiceAccount.
+message GetGoogleServiceAccountRequest {
+ // The ID of the Google Cloud Platform Console project that the Google service
+ // account is associated with.
+ // Required.
+ string project_id = 1;
+}
+
+// Request passed to CreateTransferJob.
+message CreateTransferJobRequest {
+ // The job to create.
+ // Required.
+ TransferJob transfer_job = 1;
+}
+
+// Request passed to UpdateTransferJob.
+message UpdateTransferJobRequest {
+ // The name of job to update.
+ // Required.
+ string job_name = 1;
+
+ // The ID of the Google Cloud Platform Console project that owns the job.
+ // Required.
+ string project_id = 2;
+
+ // The job to update.
+ // Required.
+ TransferJob transfer_job = 3;
+
+ // The field mask of the fields in `transferJob` that are to be updated in
+ // this request. Fields in `transferJob` that can be updated are:
+ // `description`, `transferSpec`, and `status`. To update the `transferSpec`
+ // of the job, a complete transfer specification has to be provided. An
+ // incomplete specification which misses any required fields will be rejected
+ // with the error `INVALID_ARGUMENT`.
+ google.protobuf.FieldMask update_transfer_job_field_mask = 4;
+}
+
+// Request passed to GetTransferJob.
+message GetTransferJobRequest {
+ // The job to get.
+ // Required.
+ string job_name = 1;
+
+ // The ID of the Google Cloud Platform Console project that owns the job.
+ // Required.
+ string project_id = 2;
+}
+
+// `project_id`, `job_names`, and `job_statuses` are query parameters that can
+// be specified when listing transfer jobs.
+message ListTransferJobsRequest {
+ // A list of query parameters specified as JSON text in the form of
+ // {"project_id":"my_project_id",
+ // "job_names":["jobid1","jobid2",...],
+ // "job_statuses":["status1","status2",...]}.
+ // Since `job_names` and `job_statuses` support multiple values, their values
+ // must be specified with array notation. `project_id` is required. `job_names`
+ // and `job_statuses` are optional. The valid values for `job_statuses` are
+ // case-insensitive: `ENABLED`, `DISABLED`, and `DELETED`.
+ string filter = 1;
+
+ // The list page size. The max allowed value is 256.
+ int32 page_size = 4;
+
+ // The list page token.
+ string page_token = 5;
+}
+
+// Response from ListTransferJobs.
+message ListTransferJobsResponse {
+ // A list of transfer jobs.
+ repeated TransferJob transfer_jobs = 1;
+
+ // The list next page token.
+ string next_page_token = 2;
+}
+
+// Request passed to PauseTransferOperation.
+message PauseTransferOperationRequest {
+ // The name of the transfer operation.
+ // Required.
+ string name = 1;
+}
+
+// Request passed to ResumeTransferOperation.
+message ResumeTransferOperationRequest {
+ // The name of the transfer operation.
+ // Required.
+ string name = 1;
+}
diff --git a/third_party/googleapis/google/storagetransfer/v1/transfer_types.proto b/third_party/googleapis/google/storagetransfer/v1/transfer_types.proto
new file mode 100644
index 0000000000..eca7686c4b
--- /dev/null
+++ b/third_party/googleapis/google/storagetransfer/v1/transfer_types.proto
@@ -0,0 +1,443 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.storagetransfer.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/code.proto";
+import "google/type/date.proto";
+import "google/type/timeofday.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/storagetransfer/v1;storagetransfer";
+option java_outer_classname = "TransferTypes";
+option java_package = "com.google.storagetransfer.v1.proto";
+
+
+// Google service account
+message GoogleServiceAccount {
+ // Required.
+ string account_email = 1;
+}
+
+// AWS access key (see
+// [AWS Security Credentials](http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html)).
+message AwsAccessKey {
+ // AWS access key ID.
+ // Required.
+ string access_key_id = 1;
+
+ // AWS secret access key. This field is not returned in RPC responses.
+ // Required.
+ string secret_access_key = 2;
+}
+
+// Conditions that determine which objects will be transferred.
+message ObjectConditions {
+ // If unspecified, `minTimeElapsedSinceLastModification` takes a zero value
+ // and `maxTimeElapsedSinceLastModification` takes the maximum possible
+ // value of Duration. Objects that satisfy the object conditions
+ // must either have a `lastModificationTime` greater or equal to
+ // `NOW` - `maxTimeElapsedSinceLastModification` and less than
+ // `NOW` - `minTimeElapsedSinceLastModification`, or not have a
+ // `lastModificationTime`.
+ google.protobuf.Duration min_time_elapsed_since_last_modification = 1;
+
+ // `maxTimeElapsedSinceLastModification` is the complement to
+ // `minTimeElapsedSinceLastModification`.
+ google.protobuf.Duration max_time_elapsed_since_last_modification = 2;
+
+ // If `includePrefixes` is specified, objects that satisfy the object
+ // conditions must have names that start with one of the `includePrefixes`
+ // and that do not start with any of the `excludePrefixes`. If `includePrefixes`
+ // is not specified, all objects except those that have names starting with
+ // one of the `excludePrefixes` must satisfy the object conditions.
+ //
+ // Requirements:
+ //
+ // * Each include-prefix and exclude-prefix can contain any sequence of
+ // Unicode characters, of max length 1024 bytes when UTF8-encoded, and
+ // must not contain Carriage Return or Line Feed characters. Wildcard
+ // matching and regular expression matching are not supported.
+ //
+ // * Each include-prefix and exclude-prefix must omit the leading slash.
+ // For example, to include the `requests.gz` object in a transfer from
+ // `s3://my-aws-bucket/logs/y=2015/requests.gz`, specify the include
+ // prefix as `logs/y=2015/requests.gz`.
+ //
+ // * None of the include-prefix or the exclude-prefix values can be empty,
+ // if specified.
+ //
+ // * Each include-prefix must include a distinct portion of the object
+ // namespace, i.e., no include-prefix may be a prefix of another
+ // include-prefix.
+ //
+ // * Each exclude-prefix must exclude a distinct portion of the object
+ // namespace, i.e., no exclude-prefix may be a prefix of another
+ // exclude-prefix.
+ //
+ // * If `includePrefixes` is specified, then each exclude-prefix must start
+ // with the value of a path explicitly included by `includePrefixes`.
+ //
+ // The max size of `includePrefixes` is 20.
+ repeated string include_prefixes = 3;
+
+ // `excludePrefixes` must follow the requirements described for
+ // `includePrefixes`.
+ //
+ // The max size of `excludePrefixes` is 20.
+ repeated string exclude_prefixes = 4;
+}
+
+// In a GcsData, an object's name is the Google Cloud Storage object's name and
+// its `lastModificationTime` refers to the object's updated time, which changes
+// when the content or the metadata of the object is updated.
+message GcsData {
+ // Google Cloud Storage bucket name (see
+ // [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)).
+ // Required.
+ string bucket_name = 1;
+}
+
+// An AwsS3Data can be a data source, but not a data sink.
+// In an AwsS3Data, an object's name is the S3 object's key name.
+message AwsS3Data {
+ // S3 Bucket name (see
+ // [Creating a bucket](http://docs.aws.amazon.com/AmazonS3/latest/dev/create-bucket-get-location-example.html)).
+ // Required.
+ string bucket_name = 1;
+
+ // AWS access key used to sign the API requests to the AWS S3 bucket.
+ // Permissions on the bucket must be granted to the access ID of the
+ // AWS access key.
+ // Required.
+ AwsAccessKey aws_access_key = 2;
+}
+
+// An HttpData specifies a list of objects on the web to be transferred over
+// HTTP. The information of the objects to be transferred is contained in a
+// file referenced by a URL. The first line in the file must be
+// "TsvHttpData-1.0", which specifies the format of the file. Subsequent lines
+// specify the information of the list of objects, one object per list entry.
+// Each entry has the following tab-delimited fields:
+//
+// * HTTP URL - The location of the object.
+//
+// * Length - The size of the object in bytes.
+//
+// * MD5 - The base64-encoded MD5 hash of the object.
+//
+// For an example of a valid TSV file, see
+// [Transferring data from URLs](https://cloud.google.com/storage/transfer/#urls)
+//
+// When transferring data based on a URL list, keep the following in mind:
+//
+// * When an object located at `http(s)://hostname:port/<URL-path>` is transferred
+// to a data sink, the name of the object at the data sink is
+// `<hostname>/<URL-path>`.
+//
+// * If the specified size of an object does not match the actual size of the
+// object fetched, the object will not be transferred.
+//
+// * If the specified MD5 does not match the MD5 computed from the transferred
+// bytes, the object transfer will fail. For more information, see
+// [Generating MD5 hashes](https://cloud.google.com/storage/transfer/#md5)
+//
+// * Ensure that each URL you specify is publicly accessible. For
+// example, in Google Cloud Storage you can
+// [share an object publicly]
+// (https://cloud.google.com/storage/docs/cloud-console#_sharingdata) and get
+// a link to it.
+//
+// * Storage Transfer Service obeys `robots.txt` rules and requires the source
+// HTTP server to support `Range` requests and to return a `Content-Length`
+// header in each response.
+//
+// * [ObjectConditions](#ObjectConditions) have no effect when filtering objects
+// to transfer.
+message HttpData {
+ // The URL that points to the file that stores the object list entries.
+ // This file must allow public access. Currently, only URLs with HTTP and
+ // HTTPS schemes are supported.
+ // Required.
+ string list_url = 1;
+}
+
+// TransferOptions uses three boolean parameters to define the actions
+// to be performed on objects in a transfer.
+message TransferOptions {
+ // Whether overwriting objects that already exist in the sink is allowed.
+ bool overwrite_objects_already_existing_in_sink = 1;
+
+ // Whether objects that exist only in the sink should be deleted.
+ bool delete_objects_unique_in_sink = 2;
+
+ // Whether objects should be deleted from the source after they are
+ // transferred to the sink.
+ bool delete_objects_from_source_after_transfer = 3;
+}
+
+// Configuration for running a transfer.
+message TransferSpec {
+ // The read source of the data.
+ oneof data_source {
+ // A Google Cloud Storage data source.
+ GcsData gcs_data_source = 1;
+
+ // An AWS S3 data source.
+ AwsS3Data aws_s3_data_source = 2;
+
+ // An HTTP URL data source.
+ HttpData http_data_source = 3;
+ }
+
+ // The write sink for the data.
+ oneof data_sink {
+ // A Google Cloud Storage data sink.
+ GcsData gcs_data_sink = 4;
+ }
+
+ // Only objects that satisfy these object conditions are included in the set
+ // of data source and data sink objects. Object conditions based on
+ // objects' `lastModificationTime` do not exclude objects in a data sink.
+ ObjectConditions object_conditions = 5;
+
+ // If the option `deleteObjectsUniqueInSink` is `true`, object conditions
+ // based on objects' `lastModificationTime` are ignored and do not exclude
+ // objects in a data source or a data sink.
+ TransferOptions transfer_options = 6;
+}
+
+// Transfers can be scheduled to recur or to run just once.
+message Schedule {
+ // The first day the recurring transfer is scheduled to run. If
+ // `scheduleStartDate` is in the past, the transfer will run for the first
+ // time on the following day.
+ // Required.
+ google.type.Date schedule_start_date = 1;
+
+ // The last day the recurring transfer will be run. If `scheduleEndDate`
+ // is the same as `scheduleStartDate`, the transfer will be executed only
+ // once.
+ google.type.Date schedule_end_date = 2;
+
+ // The time in UTC at which the transfer will be scheduled to start in a day.
+ // Transfers may start later than this time. If not specified, recurring and
+ // one-time transfers that are scheduled to run today will run immediately;
+ // recurring transfers that are scheduled to run on a future date will start
+ // at approximately midnight UTC on that date. Note that when configuring a
+ // transfer with the Cloud Platform Console, the transfer's start time in a
+ // day is specified in your local timezone.
+ google.type.TimeOfDay start_time_of_day = 3;
+}
+
+// This resource represents the configuration of a transfer job that runs
+// periodically.
+message TransferJob {
+ // The status of the transfer job.
+ enum Status {
+ // Zero is an illegal value.
+ STATUS_UNSPECIFIED = 0;
+
+ // New transfers will be performed based on the schedule.
+ ENABLED = 1;
+
+ // New transfers will not be scheduled.
+ DISABLED = 2;
+
+ // This is a soft delete state. After a transfer job is set to this
+ // state, the job and all the transfer executions are subject to
+ // garbage collection.
+ DELETED = 3;
+ }
+
+ // A globally unique name assigned by Storage Transfer Service when the
+ // job is created. This field should be left empty in requests to create a new
+ // transfer job; otherwise, the requests result in an `INVALID_ARGUMENT`
+ // error.
+ string name = 1;
+
+ // A description provided by the user for the job. Its max length is 1024
+ // bytes when Unicode-encoded.
+ string description = 2;
+
+ // The ID of the Google Cloud Platform Console project that owns the job.
+ // Required.
+ string project_id = 3;
+
+ // Transfer specification.
+ // Required.
+ TransferSpec transfer_spec = 4;
+
+ // Schedule specification.
+ // Required.
+ Schedule schedule = 5;
+
+ // Status of the job. This value MUST be specified for
+ // `CreateTransferJobRequests`.
+ //
+ // NOTE: The effect of the new job status takes place during a subsequent job
+ // run. For example, if you change the job status from `ENABLED` to
+ // `DISABLED`, and an operation spawned by the transfer is running, the status
+ // change would not affect the current operation.
+ Status status = 6;
+
+ // This field cannot be changed by user requests.
+ google.protobuf.Timestamp creation_time = 7;
+
+ // This field cannot be changed by user requests.
+ google.protobuf.Timestamp last_modification_time = 8;
+
+ // This field cannot be changed by user requests.
+ google.protobuf.Timestamp deletion_time = 9;
+}
+
+// An entry describing an error that has occurred.
+message ErrorLogEntry {
+ // A URL that refers to the target (a data source, a data sink,
+ // or an object) with which the error is associated.
+ // Required.
+ string url = 1;
+
+ // A list of messages that carry the error details.
+ repeated string error_details = 3;
+}
+
+// A summary of errors by error code, plus a count and sample error log
+// entries.
+message ErrorSummary {
+ // Required.
+ google.rpc.Code error_code = 1;
+
+ // Count of this type of error.
+ // Required.
+ int64 error_count = 2;
+
+ // Error samples.
+ repeated ErrorLogEntry error_log_entries = 3;
+}
+
+// A collection of counters that report the progress of a transfer operation.
+message TransferCounters {
+ // Objects found in the data source that are scheduled to be transferred,
+ // which will be copied, excluded based on conditions, or skipped due to
+ // failures.
+ int64 objects_found_from_source = 1;
+
+ // Bytes found in the data source that are scheduled to be transferred,
+ // which will be copied, excluded based on conditions, or skipped due to
+ // failures.
+ int64 bytes_found_from_source = 2;
+
+ // Objects found only in the data sink that are scheduled to be deleted.
+ int64 objects_found_only_from_sink = 3;
+
+ // Bytes found only in the data sink that are scheduled to be deleted.
+ int64 bytes_found_only_from_sink = 4;
+
+ // Objects in the data source that are not transferred because they already
+ // exist in the data sink.
+ int64 objects_from_source_skipped_by_sync = 5;
+
+ // Bytes in the data source that are not transferred because they already
+ // exist in the data sink.
+ int64 bytes_from_source_skipped_by_sync = 6;
+
+ // Objects that are copied to the data sink.
+ int64 objects_copied_to_sink = 7;
+
+ // Bytes that are copied to the data sink.
+ int64 bytes_copied_to_sink = 8;
+
+ // Objects that are deleted from the data source.
+ int64 objects_deleted_from_source = 9;
+
+ // Bytes that are deleted from the data source.
+ int64 bytes_deleted_from_source = 10;
+
+ // Objects that are deleted from the data sink.
+ int64 objects_deleted_from_sink = 11;
+
+ // Bytes that are deleted from the data sink.
+ int64 bytes_deleted_from_sink = 12;
+
+ // Objects in the data source that failed during the transfer.
+ int64 objects_from_source_failed = 13;
+
+ // Bytes in the data source that failed during the transfer.
+ int64 bytes_from_source_failed = 14;
+
+ // Objects that failed to be deleted from the data sink.
+ int64 objects_failed_to_delete_from_sink = 15;
+
+ // Bytes that failed to be deleted from the data sink.
+ int64 bytes_failed_to_delete_from_sink = 16;
+}
+
+// A description of the execution of a transfer.
+message TransferOperation {
+ // The status of a TransferOperation.
+ enum Status {
+ // Zero is an illegal value.
+ STATUS_UNSPECIFIED = 0;
+
+ // In progress.
+ IN_PROGRESS = 1;
+
+ // Paused.
+ PAUSED = 2;
+
+ // Completed successfully.
+ SUCCESS = 3;
+
+ // Terminated due to an unrecoverable failure.
+ FAILED = 4;
+
+ // Aborted by the user.
+ ABORTED = 5;
+ }
+
+ // A globally unique ID assigned by the system.
+ string name = 1;
+
+ // The ID of the Google Cloud Platform Console project that owns the operation.
+ // Required.
+ string project_id = 2;
+
+ // Transfer specification.
+ // Required.
+ TransferSpec transfer_spec = 3;
+
+ // Start time of this transfer execution.
+ google.protobuf.Timestamp start_time = 4;
+
+ // End time of this transfer execution.
+ google.protobuf.Timestamp end_time = 5;
+
+ // Status of the transfer operation.
+ Status status = 6;
+
+ // Information about the progress of the transfer operation.
+ TransferCounters counters = 7;
+
+ // Summarizes errors encountered with sample error log entries.
+ repeated ErrorSummary error_breakdowns = 8;
+
+ // The name of the transfer job that triggers this transfer operation.
+ string transfer_job_name = 9;
+}
diff --git a/third_party/googleapis/google/tracing/trace.proto b/third_party/googleapis/google/tracing/trace.proto
new file mode 100644
index 0000000000..dc10a90e58
--- /dev/null
+++ b/third_party/googleapis/google/tracing/trace.proto
@@ -0,0 +1,247 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.tracing.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/tracing/v1;tracing";
+option java_multiple_files = true;
+option java_outer_classname = "TraceProto";
+option java_package = "com.google.tracing.v1";
+
+
+// A TraceId uniquely identifies a Trace. It is conceptually a 128-bit value,
+// represented as a string, containing the hex-encoded value.
+message TraceId {
+ // Trace ID specified as a hex-encoded string. *Must* be 32 bytes long.
+ string hex_encoded = 1;
+}
+
+message Module {
+ // Binary module.
+ // E.g. main binary, kernel modules, and dynamic libraries
+ // such as libc.so, sharedlib.so
+ string module = 1;
+
+ // Build_id is a unique identifier for the module,
+ // probably a hash of its contents
+ string build_id = 2;
+}
+
+message StackTrace {
+ // Presents a single stack frame in a stack trace.
+ message StackFrame {
+ // Fully qualified names which uniquely identify function/method/etc.
+ string function_name = 1;
+
+ // Used when function name is ‘mangled’. Not guaranteed to be fully
+ // qualified but usually it is.
+ string orig_function_name = 2;
+
+ // File name of the frame.
+ string file_name = 3;
+
+ // Line number of the frame.
+ int64 line_number = 4;
+
+ // Column number is important in JavaScript(anonymous functions),
+ // Might not be available in some languages.
+ int64 column_number = 5;
+
+ // Binary module the code is loaded from.
+ Module load_module = 6;
+
+ // source_version is deployment specific. It might be
+ // better to be stored in deployment metadata.
+ // However, in distributed tracing, it’s hard to keep track of
+ // source/binary versions at one place for all spans.
+ string source_version = 7;
+ }
+
+ // Stack frames of this stack trace.
+ repeated StackFrame stack_frame = 1;
+
+ // User can choose to use his own hash function to hash large labels to save
+ // network bandwidth and storage.
+ // Typical usage is to pass both initially to inform the storage of the
+ // mapping. And in subsequent calls, pass in stack_trace_hash_id only.
+ // User shall verify the hash value is successfully stored.
+ uint64 stack_trace_hash_id = 2;
+}
+
+// Allowed label values.
+message LabelValue {
+ // The value of the label.
+ oneof value {
+ // A string value.
+ string string_value = 1;
+
+ // An integer value.
+ int64 int_value = 2;
+
+ // A boolean value.
+ bool bool_value = 3;
+ }
+}
+
+// A span represents a single operation within a trace. Spans can be nested
+// and form a trace tree. Often, a trace contains a root span that describes the
+// end-to-end latency and, optionally, one or more subspans for
+// its sub-operations. Spans do not need to be contiguous. There may be gaps
+// between spans in a trace.
+message Span {
+ // A time-stamped annotation in the Span.
+ message TimeEvent {
+ // Text annotation with a set of labels.
+ message Annotation {
+ // A user-supplied message describing the event.
+ string description = 1;
+
+ // A set of labels on the annotation.
+ map<string, LabelValue> labels = 2;
+ }
+
+ // An event describing an RPC message sent/received on the network.
+ message NetworkEvent {
+ // The type of the network event. SENT or RECV event.
+ enum Type {
+ UNSPECIFIED = 0;
+
+ SENT = 1;
+
+ RECV = 2;
+ }
+
+ // If available, this is the kernel time:
+ // For sent messages, this is the time at which the first bit was sent.
+ // For received messages, this is the time at which the last bit was
+ // received.
+ google.protobuf.Timestamp kernel_time = 1;
+
+ Type type = 2;
+
+ // Every message has an identifier, that must be different from all the
+ // network messages in this span.
+ // This is very important when the request/response are streamed.
+ uint64 message_id = 3;
+
+ // Number of bytes send/receive.
+ uint64 message_size = 4;
+ }
+
+ // The local machine absolute timestamp when this event happened.
+ google.protobuf.Timestamp local_time = 1;
+
+ oneof value {
+ // Optional field for user supplied <string, LabelValue> map
+ Annotation annotation = 2;
+
+ // Optional field that can be used only for network events.
+ NetworkEvent network_event = 3;
+ }
+ }
+
+ // Link one span with another which may be in a different Trace. Used (for
+ // example) in batching operations, where a single batch handler processes
+ // multiple requests from different traces.
+ message Link {
+ // The type of the link.
+ enum Type {
+ UNSPECIFIED = 0;
+
+ CHILD = 1;
+
+ PARENT = 2;
+ }
+
+ // The trace and span identifier of the linked span.
+ TraceId trace_id = 1;
+
+ fixed64 span_id = 2;
+
+ Type type = 3;
+ }
+
+ // Identifier for the span. Must be a 64-bit integer other than 0 and
+ // unique within a trace.
+ fixed64 id = 1;
+
+ // Name of the span. The span name is sanitized and displayed in the
+ // Stackdriver Trace tool in the {% dynamic print site_values.console_name %}.
+ // The name may be a method name or some other per-call site name.
+ // For the same executable and the same call point, a best practice is
+ // to use a consistent name, which makes it easier to correlate
+ // cross-trace spans.
+ string name = 2;
+
+ // ID of parent span. 0 or missing if this is a root span.
+ fixed64 parent_id = 3;
+
+ // Local machine clock in nanoseconds from the UNIX epoch,
+ // at which span execution started.
+ // On the server side these are the times when the server application
+ // handler starts running.
+ google.protobuf.Timestamp local_start_time = 4;
+
+ // Local machine clock in nanoseconds from the UNIX epoch,
+ // at which span execution ended.
+ // On the server side these are the times when the server application
+ // handler finishes running.
+ google.protobuf.Timestamp local_end_time = 5;
+
+ // Properties of a span. Labels at the span level.
+ // E.g.
+ // "/instance_id": "my-instance"
+ // "/zone": "us-central1-a"
+ // "/grpc/peer_address": "ip:port" (dns, etc.)
+ // "/grpc/deadline": "Duration"
+ // "/http/user_agent"
+ // "/http/request_bytes": 300
+ // "/http/response_bytes": 1200
+ // "/http/url": google.com/apis
+ // "/pid"
+ // "abc.com/mylabel": "my label value"
+ map<string, LabelValue> labels = 6;
+
+ // Stack trace captured at the start of the span. This is optional.
+ StackTrace stack_trace = 7;
+
+ // A collection of time-stamped events.
+ repeated TimeEvent time_events = 8;
+
+ // A collection of links.
+ repeated Link links = 9;
+
+ // The final status of the Span. This is optional.
+ google.rpc.Status status = 10;
+
+ // True if this Span has a remote parent (is an RPC server Span).
+ bool has_remote_parent = 11;
+}
+
+// A trace describes how long it takes for an application to perform some
+// operations. It consists of a tree of spans, each of which contains details
+// about an operation with time information and operation details.
+message Trace {
+ // Globally unique identifier for the trace. Common to all the spans.
+ TraceId trace_id = 1;
+
+ // Collection of spans in the trace. The root span has parent_id == 0.
+ repeated Span spans = 2;
+}
diff --git a/third_party/googleapis/google/type/README.md b/third_party/googleapis/google/type/README.md
new file mode 100644
index 0000000000..6caf02cf1f
--- /dev/null
+++ b/third_party/googleapis/google/type/README.md
@@ -0,0 +1,16 @@
+# Google Common Types
+
+This package contains definitions of common types for Google APIs.
+All types defined in this package are suitable for different APIs to
+exchange data, and will never break binary compatibility. They should
+have design quality comparable to major programming languages like
+Java and C#.
+
+NOTE: Some common types are defined in the package `google.protobuf`
+as they are directly supported by Protocol Buffers compiler and
+runtime. Those types are called Well-Known Types.
+
+## Java Utilities
+
+A set of Java utilities for the Common Types are provided in the
+`//java/com/google/type/util/` package. \ No newline at end of file
diff --git a/third_party/googleapis/google/type/color.proto b/third_party/googleapis/google/type/color.proto
new file mode 100644
index 0000000000..2856ce91ee
--- /dev/null
+++ b/third_party/googleapis/google/type/color.proto
@@ -0,0 +1,164 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.type;
+
+import "google/protobuf/wrappers.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/type/color;color";
+option java_multiple_files = true;
+option java_outer_classname = "ColorProto";
+option java_package = "com.google.type";
+option objc_class_prefix = "GTP";
+
+
+// Represents a color in the RGBA color space. This representation is designed
+// for simplicity of conversion to/from color representations in various
+// languages over compactness; for example, the fields of this representation
+// can be trivially provided to the constructor of "java.awt.Color" in Java; it
+// can also be trivially provided to UIColor's "+colorWithRed:green:blue:alpha"
+// method in iOS; and, with just a little work, it can be easily formatted into
+// a CSS "rgba()" string in JavaScript, as well. Here are some examples:
+//
+// Example (Java):
+//
+// import com.google.type.Color;
+//
+// // ...
+// public static java.awt.Color fromProto(Color protocolor) {
+// float alpha = protocolor.hasAlpha()
+// ? protocolor.getAlpha().getValue()
+// : 1.0;
+//
+// return new java.awt.Color(
+// protocolor.getRed(),
+// protocolor.getGreen(),
+// protocolor.getBlue(),
+// alpha);
+// }
+//
+// public static Color toProto(java.awt.Color color) {
+// float red = (float) color.getRed();
+// float green = (float) color.getGreen();
+// float blue = (float) color.getBlue();
+// float denominator = 255.0;
+// Color.Builder resultBuilder =
+// Color
+// .newBuilder()
+// .setRed(red / denominator)
+// .setGreen(green / denominator)
+// .setBlue(blue / denominator);
+// int alpha = color.getAlpha();
+// if (alpha != 255) {
+// result.setAlpha(
+// FloatValue
+// .newBuilder()
+// .setValue(((float) alpha) / denominator)
+// .build());
+// }
+// return resultBuilder.build();
+// }
+// // ...
+//
+// Example (iOS / Obj-C):
+//
+// // ...
+// static UIColor* fromProto(Color* protocolor) {
+// float red = [protocolor red];
+// float green = [protocolor green];
+// float blue = [protocolor blue];
+// FloatValue* alpha_wrapper = [protocolor alpha];
+// float alpha = 1.0;
+// if (alpha_wrapper != nil) {
+// alpha = [alpha_wrapper value];
+// }
+// return [UIColor colorWithRed:red green:green blue:blue alpha:alpha];
+// }
+//
+// static Color* toProto(UIColor* color) {
+// CGFloat red, green, blue, alpha;
+// if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) {
+// return nil;
+// }
+// Color* result = [Color alloc] init];
+// [result setRed:red];
+// [result setGreen:green];
+// [result setBlue:blue];
+// if (alpha <= 0.9999) {
+// [result setAlpha:floatWrapperWithValue(alpha)];
+// }
+// [result autorelease];
+// return result;
+// }
+// // ...
+//
+// Example (JavaScript):
+//
+// // ...
+//
+// var protoToCssColor = function(rgb_color) {
+// var redFrac = rgb_color.red || 0.0;
+// var greenFrac = rgb_color.green || 0.0;
+// var blueFrac = rgb_color.blue || 0.0;
+// var red = Math.floor(redFrac * 255);
+// var green = Math.floor(greenFrac * 255);
+// var blue = Math.floor(blueFrac * 255);
+//
+// if (!('alpha' in rgb_color)) {
+// return rgbToCssColor_(red, green, blue);
+// }
+//
+// var alphaFrac = rgb_color.alpha.value || 0.0;
+// var rgbParams = [red, green, blue].join(',');
+// return ['rgba(', rgbParams, ',', alphaFrac, ')'].join('');
+// };
+//
+// var rgbToCssColor_ = function(red, green, blue) {
+// var rgbNumber = new Number((red << 16) | (green << 8) | blue);
+// var hexString = rgbNumber.toString(16);
+// var missingZeros = 6 - hexString.length;
+// var resultBuilder = ['#'];
+// for (var i = 0; i < missingZeros; i++) {
+// resultBuilder.push('0');
+// }
+// resultBuilder.push(hexString);
+// return resultBuilder.join('');
+// };
+//
+// // ...
+message Color {
+ // The amount of red in the color as a value in the interval [0, 1].
+ float red = 1;
+
+ // The amount of green in the color as a value in the interval [0, 1].
+ float green = 2;
+
+ // The amount of blue in the color as a value in the interval [0, 1].
+ float blue = 3;
+
+ // The fraction of this color that should be applied to the pixel. That is,
+ // the final pixel color is defined by the equation:
+ //
+ // pixel color = alpha * (this color) + (1.0 - alpha) * (background color)
+ //
+ // This means that a value of 1.0 corresponds to a solid color, whereas
+ // a value of 0.0 corresponds to a completely transparent color. This
+ // uses a wrapper message rather than a simple float scalar so that it is
+ // possible to distinguish between a default value and the value being unset.
+ // If omitted, this color object is to be rendered as a solid color
+ // (as if the alpha value had been explicitly given with a value of 1.0).
+ google.protobuf.FloatValue alpha = 4;
+}
diff --git a/third_party/googleapis/google/type/date.proto b/third_party/googleapis/google/type/date.proto
new file mode 100644
index 0000000000..d873ed462f
--- /dev/null
+++ b/third_party/googleapis/google/type/date.proto
@@ -0,0 +1,45 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.type;
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/type/date;date";
+option java_multiple_files = true;
+option java_outer_classname = "DateProto";
+option java_package = "com.google.type";
+option objc_class_prefix = "GTP";
+
+
+// Represents a whole calendar date, e.g. date of birth. The time of day and
+// time zone are either specified elsewhere or are not significant. The date
+// is relative to the Proleptic Gregorian Calendar. The day may be 0 to
+// represent a year and month where the day is not significant, e.g. credit card
+// expiration date. The year may be 0 to represent a month and day independent
+// of year, e.g. anniversary date. Related types are [google.type.TimeOfDay][google.type.TimeOfDay]
+// and `google.protobuf.Timestamp`.
+message Date {
+ // Year of date. Must be from 1 to 9999, or 0 if specifying a date without
+ // a year.
+ int32 year = 1;
+
+ // Month of year. Must be from 1 to 12.
+ int32 month = 2;
+
+ // Day of month. Must be from 1 to 31 and valid for the year and month, or 0
+ // if specifying a year/month where the day is not significant.
+ int32 day = 3;
+}
diff --git a/third_party/googleapis/google/type/dayofweek.proto b/third_party/googleapis/google/type/dayofweek.proto
new file mode 100644
index 0000000000..4eaa9e729f
--- /dev/null
+++ b/third_party/googleapis/google/type/dayofweek.proto
@@ -0,0 +1,51 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.type;
+
+option go_package = "google.golang.org/genproto/googleapis/type/dayofweek;dayofweek";
+option java_multiple_files = true;
+option java_outer_classname = "DayOfWeekProto";
+option java_package = "com.google.type";
+option objc_class_prefix = "GTP";
+
+
+// Represents a day of week.
+enum DayOfWeek {
+ // The unspecified day-of-week.
+ DAY_OF_WEEK_UNSPECIFIED = 0;
+
+ // The day-of-week of Monday.
+ MONDAY = 1;
+
+ // The day-of-week of Tuesday.
+ TUESDAY = 2;
+
+ // The day-of-week of Wednesday.
+ WEDNESDAY = 3;
+
+ // The day-of-week of Thursday.
+ THURSDAY = 4;
+
+ // The day-of-week of Friday.
+ FRIDAY = 5;
+
+ // The day-of-week of Saturday.
+ SATURDAY = 6;
+
+ // The day-of-week of Sunday.
+ SUNDAY = 7;
+}
diff --git a/third_party/googleapis/google/type/latlng.proto b/third_party/googleapis/google/type/latlng.proto
new file mode 100644
index 0000000000..4e8c65d227
--- /dev/null
+++ b/third_party/googleapis/google/type/latlng.proto
@@ -0,0 +1,71 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.type;
+
+option go_package = "google.golang.org/genproto/googleapis/type/latlng;latlng";
+option java_multiple_files = true;
+option java_outer_classname = "LatLngProto";
+option java_package = "com.google.type";
+option objc_class_prefix = "GTP";
+
+
+// An object representing a latitude/longitude pair. This is expressed as a pair
+// of doubles representing degrees latitude and degrees longitude. Unless
+// specified otherwise, this must conform to the
+// <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+// standard</a>. Values must be within normalized ranges.
+//
+// Example of normalization code in Python:
+//
+// def NormalizeLongitude(longitude):
+// """Wraps decimal degrees longitude to [-180.0, 180.0]."""
+// q, r = divmod(longitude, 360.0)
+// if r > 180.0 or (r == 180.0 and q <= -1.0):
+// return r - 360.0
+// return r
+//
+// def NormalizeLatLng(latitude, longitude):
+// """Wraps decimal degrees latitude and longitude to
+// [-90.0, 90.0] and [-180.0, 180.0], respectively."""
+// r = latitude % 360.0
+// if r <= 90.0:
+// return r, NormalizeLongitude(longitude)
+// elif r >= 270.0:
+// return r - 360, NormalizeLongitude(longitude)
+// else:
+// return 180 - r, NormalizeLongitude(longitude + 180.0)
+//
+// assert 180.0 == NormalizeLongitude(180.0)
+// assert -180.0 == NormalizeLongitude(-180.0)
+// assert -179.0 == NormalizeLongitude(181.0)
+// assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
+// assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
+// assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
+// assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
+// assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
+// assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
+// assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
+// assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
+// assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
+// assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
+message LatLng {
+ // The latitude in degrees. It must be in the range [-90.0, +90.0].
+ double latitude = 1;
+
+ // The longitude in degrees. It must be in the range [-180.0, +180.0].
+ double longitude = 2;
+}
diff --git a/third_party/googleapis/google/type/money.proto b/third_party/googleapis/google/type/money.proto
new file mode 100644
index 0000000000..154acc9f29
--- /dev/null
+++ b/third_party/googleapis/google/type/money.proto
@@ -0,0 +1,42 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.type;
+
+option go_package = "google.golang.org/genproto/googleapis/type/money;money";
+option java_multiple_files = true;
+option java_outer_classname = "MoneyProto";
+option java_package = "com.google.type";
+option objc_class_prefix = "GTP";
+
+
+// Represents an amount of money with its currency type.
+message Money {
+ // The 3-letter currency code defined in ISO 4217.
+ string currency_code = 1;
+
+ // The whole units of the amount.
+ // For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+ int64 units = 2;
+
+ // Number of nano (10^-9) units of the amount.
+ // The value must be between -999,999,999 and +999,999,999 inclusive.
+ // If `units` is positive, `nanos` must be positive or zero.
+ // If `units` is zero, `nanos` can be positive, zero, or negative.
+ // If `units` is negative, `nanos` must be negative or zero.
+ // For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+ int32 nanos = 3;
+}
diff --git a/third_party/googleapis/google/type/postal_address.proto b/third_party/googleapis/google/type/postal_address.proto
new file mode 100644
index 0000000000..b08b61726a
--- /dev/null
+++ b/third_party/googleapis/google/type/postal_address.proto
@@ -0,0 +1,132 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.type;
+
+option go_package = "google.golang.org/genproto/googleapis/type/postaladdress;postaladdress";
+option java_multiple_files = true;
+option java_outer_classname = "PostalAddressProto";
+option java_package = "com.google.type";
+option objc_class_prefix = "GTP";
+
+
+// Represents a postal address, e.g. for postal delivery or payments addresses.
+// Given a postal address, a postal service can deliver items to a premise, P.O.
+// Box or similar.
+// It is not intended to model geographical locations (roads, towns,
+// mountains).
+//
+// In typical usage an address would be created via user input or from importing
+// existing data, depending on the type of process.
+//
+// Advice on address input / editing:
+// - Use an i18n-ready address widget such as
+// https://github.com/googlei18n/libaddressinput)
+// - Users should not be presented with UI elements for input or editing of
+// fields outside countries where that field is used.
+//
+// For more guidance on how to use this schema, please see:
+// https://support.google.com/business/answer/6397478
+message PostalAddress {
+ // The schema revision of the `PostalAddress`.
+ // All new revisions **must** be backward compatible with old revisions.
+ int32 revision = 1;
+
+ // Required. CLDR region code of the country/region of the address. This
+ // is never inferred and it is up to the user to ensure the value is
+ // correct. See http://cldr.unicode.org/ and
+ // http://www.unicode.org/cldr/charts/30/supplemental/territory_information.html
+ // for details. Example: "CH" for Switzerland.
+ string region_code = 2;
+
+ // Optional. BCP-47 language code of the contents of this address (if
+ // known). This is often the UI language of the input form or is expected
+ // to match one of the languages used in the address' country/region, or their
+ // transliterated equivalents.
+ // This can affect formatting in certain countries, but is not critical
+ // to the correctness of the data and will never affect any validation or
+ // other non-formatting related operations.
+ //
+ // If this value is not known, it should be omitted (rather than specifying a
+ // possibly incorrect default).
+ //
+ // Examples: "zh-Hant", "ja", "ja-Latn", "en".
+ string language_code = 3;
+
+ // Optional. Postal code of the address. Not all countries use or require
+ // postal codes to be present, but where they are used, they may trigger
+ // additional validation with other parts of the address (e.g. state/zip
+ // validation in the U.S.A.).
+ string postal_code = 4;
+
+ // Optional. Additional, country-specific, sorting code. This is not used
+ // in most regions. Where it is used, the value is either a string like
+ // "CEDEX", optionally followed by a number (e.g. "CEDEX 7"), or just a number
+ // alone, representing the "sector code" (Jamaica), "delivery area indicator"
+ // (Malawi) or "post office indicator" (e.g. Côte d'Ivoire).
+ string sorting_code = 5;
+
+ // Optional. Highest administrative subdivision which is used for postal
+ // addresses of a country or region.
+ // For example, this can be a state, a province, an oblast, or a prefecture.
+ // Specifically, for Spain this is the province and not the autonomous
+ // community (e.g. "Barcelona" and not "Catalonia").
+ // Many countries don't use an administrative area in postal addresses. E.g.
+ // in Switzerland this should be left unpopulated.
+ string administrative_area = 6;
+
+ // Optional. Generally refers to the city/town portion of the address.
+ // Examples: US city, IT comune, UK post town.
+ // In regions of the world where localities are not well defined or do not fit
+ // into this structure well, leave locality empty and use address_lines.
+ string locality = 7;
+
+ // Optional. Sublocality of the address.
+ // For example, this can be neighborhoods, boroughs, districts.
+ string sublocality = 8;
+
+ // Unstructured address lines describing the lower levels of an address.
+ //
+ // Because values in address_lines do not have type information and may
+ // sometimes contain multiple values in a single field (e.g.
+ // "Austin, TX"), it is important that the line order is clear. The order of
+ // address lines should be "envelope order" for the country/region of the
+ // address. In places where this can vary (e.g. Japan), address_language is
+ // used to make it explicit (e.g. "ja" for large-to-small ordering and
+ // "ja-Latn" or "en" for small-to-large). This way, the most specific line of
+ // an address can be selected based on the language.
+ //
+ // The minimum permitted structural representation of an address consists
+ // of a region_code with all remaining information placed in the
+ // address_lines. It would be possible to format such an address very
+ // approximately without geocoding, but no semantic reasoning could be
+ // made about any of the address components until it was at least
+ // partially resolved.
+ //
+ // Creating an address only containing a region_code and address_lines, and
+ // then geocoding is the recommended way to handle completely unstructured
+ // addresses (as opposed to guessing which parts of the address should be
+ // localities or administrative areas).
+ repeated string address_lines = 9;
+
+ // Optional. The recipient at the address.
+ // This field may, under certain circumstances, contain multiline information.
+ // For example, it might contain "care of" information.
+ repeated string recipients = 10;
+
+ // Optional. The name of the organization at the address.
+ string organization = 11;
+}
diff --git a/third_party/googleapis/google/type/timeofday.proto b/third_party/googleapis/google/type/timeofday.proto
new file mode 100644
index 0000000000..b130256e22
--- /dev/null
+++ b/third_party/googleapis/google/type/timeofday.proto
@@ -0,0 +1,43 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.type;
+
+option go_package = "google.golang.org/genproto/googleapis/type/timeofday;timeofday";
+option java_multiple_files = true;
+option java_outer_classname = "TimeOfDayProto";
+option java_package = "com.google.type";
+option objc_class_prefix = "GTP";
+
+
+// Represents a time of day. The date and time zone are either not significant
+// or are specified elsewhere. An API may chose to allow leap seconds. Related
+// types are [google.type.Date][google.type.Date] and `google.protobuf.Timestamp`.
+message TimeOfDay {
+ // Hours of day in 24 hour format. Should be from 0 to 23. An API may choose
+ // to allow the value "24:00:00" for scenarios like business closing time.
+ int32 hours = 1;
+
+ // Minutes of hour of day. Must be from 0 to 59.
+ int32 minutes = 2;
+
+ // Seconds of minutes of the time. Must normally be from 0 to 59. An API may
+ // allow the value 60 if it allows leap-seconds.
+ int32 seconds = 3;
+
+ // Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.
+ int32 nanos = 4;
+}
diff --git a/third_party/googleapis/google/watcher/v1/watch.proto b/third_party/googleapis/google/watcher/v1/watch.proto
new file mode 100644
index 0000000000..5d2c4ab24f
--- /dev/null
+++ b/third_party/googleapis/google/watcher/v1/watch.proto
@@ -0,0 +1,283 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.watcher.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/any.proto";
+import "google/protobuf/empty.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/watcher/v1;watcher";
+option java_multiple_files = true;
+option java_outer_classname = "WatchProto";
+
+// ## API Overview
+//
+// [Watcher][] lets a client watch for updates to a named entity, such as a
+// directory or database table. For each watched entity, the client receives a
+// reliable stream of watch events, without re-ordering.
+//
+// Watching is done by sending an RPC to a service that implements the API. The
+// argument to the RPC contains the name of the entity. The result stream
+// consists of a sequence of Change messages that the service continues to
+// send until the call fails or is cancelled.
+//
+// ## Data model
+//
+// This API assumes that each *entity* has a name and a
+// set of *elements*, where each element has a name and a value. The
+// entity's name must be a unique identifier within the service, such as
+// a resource name. What constitutes an entity or element is
+// implementation-specific: for example, a file system implementation
+// might define an entity as either a directory or a file, and elements would be
+// child files or directories of that entity.
+//
+// The Watch API allows a client to watch an entity E's immediate
+// elements or the whole tree rooted at E. Elements are organized into
+// a hierarchy ("" at the top; the rest follows the natural hierarchy of the
+// namespace of elements that is being watched). For example, when
+// recursively watching a filesystem namespace, X is an ancestor of
+// X/Y and X/Y/Z).
+//
+// ## Watch request
+//
+// When a client makes a request to watch an entity, it can indicate
+// whether it wants to receive the initial state of the entity, just
+// new changes to the entity, or resume watching from a particular
+// point in a previous watch stream, specified with a `resume_marker` value.
+// It can also indicate whether it wants to watch only one entity or all
+// entities in the subtree rooted at a particular entity's name.
+//
+// On receiving a watch request for an entity, the server sends one or more
+// messages to the client. The first message informs the client that the server
+// has registered the client's request: the instant of time when the
+// client receives the event is referred to as the client's "watch
+// point" for that entity.
+//
+// ## Atomic delivery
+//
+// The response stream consists of a sequence of Change messages. Each
+// message contains an `continued` bit. A sub-sequence of Change messages with
+// `continued=true` followed by a Change message with `continued=false` forms an
+// *atomic group*. Systems that support multi-element atomic updates may
+// guarantee that all changes resulting from a single atomic
+// update are delivered in the same atomic group. It is up to the
+// documentation of a particular system that implements the Watch API to
+// document whether or not it supports such grouping. We expect that most
+// callers will ignore the notion of atomic delivery and the `continued` bit,
+// i.e., they will just process each Change message as it is received.
+//
+// ## Batching
+//
+// Multiple Change messages may be grouped into a single ChangeBatch message
+// to reduce message transfer overhead. A single ChangeBatch may contain many
+// atomic groups, or a single atomic group may be split across many
+// ChangeBatch messages.
+//
+// ## Initial State
+//
+// The first atomic group delivered by a watch call is special. It is
+// delivered as soon as possible and contains the initial state of the
+// entity being watched. The client should consider itself caught up
+// after processing this first atomic group.
+//
+// The messages in the first atomic group will either refer to the
+// entity itself (`Change.element` == "") or to elements inside the
+// entity (`Change.element` != ""). Here are the cases to consider:
+//
+// 1. `resume_marker` is "" or not specified: For every element P
+// (including the entity itself) that exists, there will be at least
+// one message delivered with element == P and the last such message
+// will contain the current state of P. For every element Q
+// (including the entity itself) that does not exist, either no
+// message will be delivered, or the last message for Q will have
+// state == DOES_NOT_EXIST. At least one message for element="" will
+// be delivered.
+//
+// 2. `resume_marker` == "now": there will be exactly one message with
+// element = "" and state INITIAL_STATE_SKIPPED. The client cannot
+// assume whether or not the entity exists after receiving this
+// message.
+//
+// 3. `resume_marker` has a value R from a preceding watch call on this
+// entity: The same messages as described in (1) will be delivered to
+// the client, except that any information implied by messages received
+// on the preceding call up to and including R may not be
+// delivered. The expectation is that the client will start with state
+// it had built up from the preceding watch call, apply the changes
+// received from this call, and build an up-to-date view of the entity
+// without having to fetch a potentially large amount of information
+// that has not changed. Note that some information that had already
+// been delivered by the preceding call might be delivered again.
+//
+// ## Ordering and Reliability
+//
+// The Change messages that apply to a particular element of the entity are
+// delivered eventually in order without loss for the duration of the RPC. Note
+// however that if multiple Changes apply to the same element, the
+// implementation is free to suppress them and deliver just the last one. The
+// underlying system must provide the guarantee that any relevant update
+// received for an entity E after a client's watch point for E MUST be delivered
+// to that client.
+//
+// These tight guarantees allow for the following simplifications in the client:
+//
+// 1. The client does not need to have a separate polling loop to make up for
+// missed updates.
+//
+// 2. The client does not need to manage timestamps/versions manually; the
+// last update delivered corresponds to the eventual state of the entity.
+//
+// Example: a calendar entry may have elements named { "starttime", "endtime",
+// "attendees" } with corresponding values or it may have a single element name
+// "entry" with a serialized proto for the calendar entry.
+//
+// ## Ordering constraints for parents/descendants
+//
+// The Watch API provides guarantees regarding the order in which
+// messages for a parent and its descendants are delivered:
+//
+// 1. The creation of an ancestor (i.e., the first EXISTS message for
+// the ancestor) is reported before the creation of any of its
+// descendants.
+//
+// 2. The deletion of an ancestor (via a DOES_NOT_EXIST message)
+// implies the deletion of all its descendants. The service will
+// not deliver any messages for the descendants until the parent
+// has been recreated.
+
+
+// The service that a client uses to connect to the watcher system.
+// The errors returned by the service are in the canonical error space,
+// see [google.rpc.Code][].
+service Watcher {
+ // Start a streaming RPC to get watch information from the server.
+ rpc Watch(Request) returns (stream ChangeBatch) {
+ option (google.api.http) = { get: "/v1/watch" };
+ }
+}
+
+// The message used by the client to register interest in an entity.
+message Request {
+ // The `target` value **must** be a valid URL path pointing to an entity
+ // to watch. Note that the service name **must** be
+ // removed from the target field (e.g., the target field must say
+ // "/foo/bar", not "myservice.googleapis.com/foo/bar"). A client is
+ // also allowed to pass system-specific parameters in the URL that
+ // are only obeyed by some implementations. Some parameters will be
+ // implementation-specific. However, some have predefined meaning
+ // and are listed here:
+ //
+ // * recursive = true|false [default=false]
+ // If set to true, indicates that the client wants to watch all elements
+ // of entities in the subtree rooted at the entity's name in `target`. For
+ // descendants that are not the immediate children of the target, the
+ // `Change.element` will contain slashes.
+ //
+ // Note that some namespaces and entities will not support recursive
+ // watching. When watching such an entity, a client must not set recursive
+ // to true. Otherwise, it will receive an `UNIMPLEMENTED` error.
+ //
+ // Normal URL encoding must be used inside `target`. For example, if a query
+ // parameter name or value, or the non-query parameter portion of `target`
+ // contains a special character, it must be %-encoded. We recommend that
+ // clients and servers use their runtime's URL library to produce and consume
+ // target values.
+ string target = 1;
+
+ // The `resume_marker` specifies how much of the existing underlying state is
+ // delivered to the client when the watch request is received by the
+ // system. The client can set this marker in one of the following ways to get
+ // different semantics:
+ //
+ // * Parameter is not specified or has the value "".
+ // Semantics: Fetch initial state.
+ // The client wants the entity's initial state to be delivered. See the
+ // description in "Initial State".
+ //
+ // * Parameter is set to the string "now" (UTF-8 encoding).
+ // Semantics: Fetch new changes only.
+ // The client just wants to get the changes received by the system after
+ // the watch point. The system may deliver changes from before the watch
+ // point as well.
+ //
+ // * Parameter is set to a value received in an earlier
+ // `Change.resume_marker` field while watching the same entity.
+ // Semantics: Resume from a specific point.
+ // The client wants to receive the changes from a specific point; this
+ // value must correspond to a value received in the `Change.resume_marker`
+ // field. The system may deliver changes from before the `resume_marker`
+ // as well. If the system cannot resume the stream from this point (e.g.,
+ // if it is too far behind in the stream), it can raise the
+ // `FAILED_PRECONDITION` error.
+ //
+ // An implementation MUST support an unspecified parameter and the
+ // empty string "" marker (initial state fetching) and the "now" marker.
+ // It need not support resuming from a specific point.
+ bytes resume_marker = 2;
+}
+
+// A batch of Change messages.
+message ChangeBatch {
+ // A list of Change messages.
+ repeated Change changes = 1;
+}
+
+// A Change indicates the most recent state of an element.
+message Change {
+ // A reported value can be in one of the following states:
+ enum State {
+ // The element exists and its full value is included in data.
+ EXISTS = 0;
+
+ // The element does not exist.
+ DOES_NOT_EXIST = 1;
+
+ // Element may or may not exist. Used only for initial state delivery when
+ // the client is not interested in fetching the initial state. See the
+ // "Initial State" section above.
+ INITIAL_STATE_SKIPPED = 2;
+
+ // The element may exist, but some error has occurred. More information is
+ // available in the data field - the value is a serialized Status
+ // proto (from [google.rpc.Status][])
+ ERROR = 3;
+ }
+
+ // Name of the element, interpreted relative to the entity's actual
+ // name. "" refers to the entity itself. The element name is a valid
+ // UTF-8 string.
+ string element = 1;
+
+ // The state of the `element`.
+ State state = 2;
+
+ // The actual change data. This field is present only when `state() == EXISTS`
+ // or `state() == ERROR`. Please see [google.protobuf.Any][google.protobuf.Any] about how to use
+ // the Any type.
+ google.protobuf.Any data = 6;
+
+ // If present, provides a compact representation of all the messages that have
+ // been received by the caller for the given entity, e.g., it could be a
+ // sequence number or a multi-part timestamp/version vector. This marker can
+ // be provided in the Request message, allowing the caller to resume the stream
+ // watching at a specific point without fetching the initial state.
+ bytes resume_marker = 4;
+
+ // If true, this Change is followed by more Changes that are in the same group
+ // as this Change.
+ bool continued = 5;
+}