aboutsummaryrefslogtreecommitdiffhomepage
path: root/third_party/googleapis/google/bigtable
diff options
context:
space:
mode:
authorGravatar Jakob Buchgraber <buchgr@google.com>2017-05-30 15:49:37 +0200
committerGravatar Jakob Buchgraber <buchgr@google.com>2017-05-30 15:59:48 +0200
commit84a8e95910f069dd03a19b0fc634f95bb0beac95 (patch)
treed3a5cd8e259799e36834b247194c2d1aa3163e16 /third_party/googleapis/google/bigtable
parentc7696b47a4a12b1e56e41246770cbd44ad1c9c3e (diff)
Introduce third_party/googleapis
Add `https://github.com/googleapis/googleapis` as a third_party dependency at commit `001f6702ac4cd72194a5120ff978fcfa740783d6`. These protos are required for the upcoming open sourcing of the BES protocol code. Additionally, add (java_)proto_library() rules for the protobufs required by the BES protocol. Change-Id: Ie78a9941a62f2085a58ad859c91161885e6f390d
Diffstat (limited to 'third_party/googleapis/google/bigtable')
-rw-r--r--third_party/googleapis/google/bigtable/admin/bigtableadmin.yaml76
-rw-r--r--third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_data.proto126
-rw-r--r--third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_service.proto80
-rw-r--r--third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_service_messages.proto116
-rw-r--r--third_party/googleapis/google/bigtable/admin/v2/bigtable_instance_admin.proto233
-rw-r--r--third_party/googleapis/google/bigtable/admin/v2/bigtable_table_admin.proto214
-rw-r--r--third_party/googleapis/google/bigtable/admin/v2/common.proto38
-rw-r--r--third_party/googleapis/google/bigtable/admin/v2/instance.proto130
-rw-r--r--third_party/googleapis/google/bigtable/admin/v2/table.proto118
-rw-r--r--third_party/googleapis/google/bigtable/bigtable.yaml33
-rw-r--r--third_party/googleapis/google/bigtable/v1/bigtable_data.proto516
-rw-r--r--third_party/googleapis/google/bigtable/v1/bigtable_service.proto74
-rw-r--r--third_party/googleapis/google/bigtable/v1/bigtable_service_messages.proto218
-rw-r--r--third_party/googleapis/google/bigtable/v2/bigtable.proto322
-rw-r--r--third_party/googleapis/google/bigtable/v2/bigtable_gapic.yaml137
-rw-r--r--third_party/googleapis/google/bigtable/v2/data.proto533
16 files changed, 2964 insertions, 0 deletions
diff --git a/third_party/googleapis/google/bigtable/admin/bigtableadmin.yaml b/third_party/googleapis/google/bigtable/admin/bigtableadmin.yaml
new file mode 100644
index 0000000000..cbf0447122
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/admin/bigtableadmin.yaml
@@ -0,0 +1,76 @@
+# Google Bigtable Admin API service configuration
+
+type: google.api.Service
+config_version: 3
+name: bigtableadmin.googleapis.com
+title: Cloud Bigtable Admin API
+
+apis:
+- name: google.bigtable.admin.v2.BigtableInstanceAdmin
+- name: google.bigtable.admin.v2.BigtableTableAdmin
+- name: google.longrunning.Operations
+
+# Additional types which are used as google.protobuf.Any values
+types:
+- name: google.bigtable.admin.v2.CreateInstanceMetadata
+- name: google.bigtable.admin.v2.UpdateClusterMetadata
+
+authentication:
+ rules:
+ # Unless explicitly weakened, all BigtableInstanceAdmin ops require cluster
+ # admin access.
+ - selector: google.bigtable.admin.v2.BigtableInstanceAdmin.*,
+ google.longrunning.Operations.*
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/bigtable.admin,
+ https://www.googleapis.com/auth/bigtable.admin.cluster,
+ https://www.googleapis.com/auth/bigtable.admin.instance,
+ https://www.googleapis.com/auth/cloud-bigtable.admin,
+ https://www.googleapis.com/auth/cloud-bigtable.admin.cluster,
+ https://www.googleapis.com/auth/cloud-platform
+ # BigtableInstanceAdmin Ops which only require read access
+ - selector: google.bigtable.admin.v2.BigtableInstanceAdmin.GetCluster,
+ google.bigtable.admin.v2.BigtableInstanceAdmin.GetInstance,
+ google.bigtable.admin.v2.BigtableInstanceAdmin.ListClusters,
+ google.bigtable.admin.v2.BigtableInstanceAdmin.ListInstances,
+ google.longrunning.Operations.GetOperation,
+ google.longrunning.Operations.ListOperations
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/bigtable.admin,
+ https://www.googleapis.com/auth/bigtable.admin.cluster,
+ https://www.googleapis.com/auth/bigtable.admin.instance,
+ https://www.googleapis.com/auth/cloud-bigtable.admin,
+ https://www.googleapis.com/auth/cloud-bigtable.admin.cluster,
+ https://www.googleapis.com/auth/cloud-platform,
+ https://www.googleapis.com/auth/cloud-platform.read-only
+
+ # Unless explicitly weakened, all BigtableTableAdmin ops require table admin access
+ - selector: google.bigtable.admin.v2.BigtableTableAdmin.*
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/bigtable.admin,
+ https://www.googleapis.com/auth/bigtable.admin.table,
+ https://www.googleapis.com/auth/cloud-bigtable.admin,
+ https://www.googleapis.com/auth/cloud-bigtable.admin.table,
+ https://www.googleapis.com/auth/cloud-platform
+ # BigtableTableAdmin Ops which only require read access
+ - selector: google.bigtable.admin.v2.BigtableTableAdmin.GetTable,
+ google.bigtable.admin.v2.BigtableTableAdmin.ListTables
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/bigtable.admin,
+ https://www.googleapis.com/auth/bigtable.admin.table,
+ https://www.googleapis.com/auth/cloud-bigtable.admin,
+ https://www.googleapis.com/auth/cloud-bigtable.admin.table,
+ https://www.googleapis.com/auth/cloud-platform,
+ https://www.googleapis.com/auth/cloud-platform.read-only
+
+# Http override to expose Operations API at v2
+http:
+ rules:
+ - selector: google.longrunning.Operations.GetOperation
+ get: '/v2/{name=operations/**}'
+ - selector: google.longrunning.Operations.ListOperations
+ get: '/v2/{name=operations}'
+ - selector: google.longrunning.Operations.CancelOperation
+ post: '/v2/{name=operations/**}:cancel'
+ - selector: google.longrunning.Operations.DeleteOperation
+ delete: '/v2/{name=operations/**}'
diff --git a/third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_data.proto b/third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_data.proto
new file mode 100644
index 0000000000..40072416cd
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_data.proto
@@ -0,0 +1,126 @@
+// Copyright (c) 2015, Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.admin.table.v1;
+
+import "google/longrunning/operations.proto";
+import "google/protobuf/duration.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table";
+option java_multiple_files = true;
+option java_outer_classname = "BigtableTableDataProto";
+option java_package = "com.google.bigtable.admin.table.v1";
+
+
+// A collection of user data indexed by row, column, and timestamp.
+// Each table is served using the resources of its parent cluster.
+message Table {
+ enum TimestampGranularity {
+ MILLIS = 0;
+ }
+
+ // A unique identifier of the form
+ // <cluster_name>/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*
+ string name = 1;
+
+ // If this Table is in the process of being created, the Operation used to
+ // track its progress. As long as this operation is present, the Table will
+ // not accept any Table Admin or Read/Write requests.
+ google.longrunning.Operation current_operation = 2;
+
+ // The column families configured for this table, mapped by column family id.
+ map<string, ColumnFamily> column_families = 3;
+
+ // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in
+ // this table. Timestamps not matching the granularity will be rejected.
+ // Cannot be changed once the table is created.
+ TimestampGranularity granularity = 4;
+}
+
+// A set of columns within a table which share a common configuration.
+message ColumnFamily {
+ // A unique identifier of the form <table_name>/columnFamilies/[-_.a-zA-Z0-9]+
+ // The last segment is the same as the "name" field in
+ // google.bigtable.v1.Family.
+ string name = 1;
+
+ // Garbage collection expression specified by the following grammar:
+ // GC = EXPR
+ // | "" ;
+ // EXPR = EXPR, "||", EXPR (* lowest precedence *)
+ // | EXPR, "&&", EXPR
+ // | "(", EXPR, ")" (* highest precedence *)
+ // | PROP ;
+ // PROP = "version() >", NUM32
+ // | "age() >", NUM64, [ UNIT ] ;
+ // NUM32 = non-zero-digit { digit } ; (* # NUM32 <= 2^32 - 1 *)
+ // NUM64 = non-zero-digit { digit } ; (* # NUM64 <= 2^63 - 1 *)
+ // UNIT = "d" | "h" | "m" (* d=days, h=hours, m=minutes, else micros *)
+ // GC expressions can be up to 500 characters in length
+ //
+ // The different types of PROP are defined as follows:
+ // version() - cell index, counting from most recent and starting at 1
+ // age() - age of the cell (current time minus cell timestamp)
+ //
+ // Example: "version() > 3 || (age() > 3d && version() > 1)"
+ // drop cells beyond the most recent three, and drop cells older than three
+ // days unless they're the most recent cell in the row/column
+ //
+ // Garbage collection executes opportunistically in the background, and so
+ // it's possible for reads to return a cell even if it matches the active GC
+ // expression for its family.
+ string gc_expression = 2;
+
+ // Garbage collection rule specified as a protobuf.
+ // Supersedes `gc_expression`.
+ // Must serialize to at most 500 bytes.
+ //
+ // NOTE: Garbage collection executes opportunistically in the background, and
+ // so it's possible for reads to return a cell even if it matches the active
+ // GC expression for its family.
+ GcRule gc_rule = 3;
+}
+
+// Rule for determining which cells to delete during garbage collection.
+message GcRule {
+ // A GcRule which deletes cells matching all of the given rules.
+ message Intersection {
+ // Only delete cells which would be deleted by every element of `rules`.
+ repeated GcRule rules = 1;
+ }
+
+ // A GcRule which deletes cells matching any of the given rules.
+ message Union {
+ // Delete cells which would be deleted by any element of `rules`.
+ repeated GcRule rules = 1;
+ }
+
+ oneof rule {
+ // Delete all cells in a column except the most recent N.
+ int32 max_num_versions = 1;
+
+ // Delete cells in a column older than the given age.
+ // Values must be at least one millisecond, and will be truncated to
+ // microsecond granularity.
+ google.protobuf.Duration max_age = 2;
+
+ // Delete cells that would be deleted by every nested rule.
+ Intersection intersection = 3;
+
+ // Delete cells that would be deleted by any nested rule.
+ Union union = 4;
+ }
+}
diff --git a/third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_service.proto b/third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_service.proto
new file mode 100644
index 0000000000..6962862776
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_service.proto
@@ -0,0 +1,80 @@
+// Copyright (c) 2015, Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.admin.table.v1;
+
+import "google/api/annotations.proto";
+import "google/bigtable/admin/table/v1/bigtable_table_data.proto";
+import "google/bigtable/admin/table/v1/bigtable_table_service_messages.proto";
+import "google/protobuf/empty.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table";
+option java_multiple_files = true;
+option java_outer_classname = "BigtableTableServicesProto";
+option java_package = "com.google.bigtable.admin.table.v1";
+
+
+// Service for creating, configuring, and deleting Cloud Bigtable tables.
+// Provides access to the table schemas only, not the data stored within the tables.
+service BigtableTableService {
+ // Creates a new table, to be served from a specified cluster.
+ // The table can be created with a full set of initial column families,
+ // specified in the request.
+ rpc CreateTable(CreateTableRequest) returns (Table) {
+ option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}/tables" body: "*" };
+ }
+
+ // Lists the names of all tables served from a specified cluster.
+ rpc ListTables(ListTablesRequest) returns (ListTablesResponse) {
+ option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}/tables" };
+ }
+
+ // Gets the schema of the specified table, including its column families.
+ rpc GetTable(GetTableRequest) returns (Table) {
+ option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" };
+ }
+
+ // Permanently deletes a specified table and all of its data.
+ rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" };
+ }
+
+ // Changes the name of a specified table.
+ // Cannot be used to move tables between clusters, zones, or projects.
+ rpc RenameTable(RenameTableRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}:rename" body: "*" };
+ }
+
+ // Creates a new column family within a specified table.
+ rpc CreateColumnFamily(CreateColumnFamilyRequest) returns (ColumnFamily) {
+ option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}/columnFamilies" body: "*" };
+ }
+
+ // Changes the configuration of a specified column family.
+ rpc UpdateColumnFamily(ColumnFamily) returns (ColumnFamily) {
+ option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" body: "*" };
+ }
+
+ // Permanently deletes a specified column family and all of its data.
+ rpc DeleteColumnFamily(DeleteColumnFamilyRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" };
+ }
+
+ // Delete all rows in a table corresponding to a particular prefix
+ rpc BulkDeleteRows(BulkDeleteRowsRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:bulkDeleteRows" body: "*" };
+ }
+}
diff --git a/third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_service_messages.proto b/third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_service_messages.proto
new file mode 100644
index 0000000000..7374dc9d8b
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/admin/table/v1/bigtable_table_service_messages.proto
@@ -0,0 +1,116 @@
+// Copyright (c) 2015, Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.admin.table.v1;
+
+import "google/bigtable/admin/table/v1/bigtable_table_data.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table";
+option java_multiple_files = true;
+option java_outer_classname = "BigtableTableServiceMessagesProto";
+option java_package = "com.google.bigtable.admin.table.v1";
+
+
+message CreateTableRequest {
+ // The unique name of the cluster in which to create the new table.
+ string name = 1;
+
+ // The name by which the new table should be referred to within the cluster,
+ // e.g. "foobar" rather than "<cluster_name>/tables/foobar".
+ string table_id = 2;
+
+ // The Table to create. The `name` field of the Table and all of its
+ // ColumnFamilies must be left blank, and will be populated in the response.
+ Table table = 3;
+
+ // The optional list of row keys that will be used to initially split the
+ // table into several tablets (Tablets are similar to HBase regions).
+ // Given two split keys, "s1" and "s2", three tablets will be created,
+ // spanning the key ranges: [, s1), [s1, s2), [s2, ).
+ //
+ // Example:
+ // * Row keys := ["a", "apple", "custom", "customer_1", "customer_2",
+ // "other", "zz"]
+ // * initial_split_keys := ["apple", "customer_1", "customer_2", "other"]
+ // * Key assignment:
+ // - Tablet 1 [, apple) => {"a"}.
+ // - Tablet 2 [apple, customer_1) => {"apple", "custom"}.
+ // - Tablet 3 [customer_1, customer_2) => {"customer_1"}.
+ // - Tablet 4 [customer_2, other) => {"customer_2"}.
+ // - Tablet 5 [other, ) => {"other", "zz"}.
+ repeated string initial_split_keys = 4;
+}
+
+message ListTablesRequest {
+ // The unique name of the cluster for which tables should be listed.
+ string name = 1;
+}
+
+message ListTablesResponse {
+ // The tables present in the requested cluster.
+ // At present, only the names of the tables are populated.
+ repeated Table tables = 1;
+}
+
+message GetTableRequest {
+ // The unique name of the requested table.
+ string name = 1;
+}
+
+message DeleteTableRequest {
+ // The unique name of the table to be deleted.
+ string name = 1;
+}
+
+message RenameTableRequest {
+ // The current unique name of the table.
+ string name = 1;
+
+ // The new name by which the table should be referred to within its containing
+ // cluster, e.g. "foobar" rather than "<cluster_name>/tables/foobar".
+ string new_id = 2;
+}
+
+message CreateColumnFamilyRequest {
+ // The unique name of the table in which to create the new column family.
+ string name = 1;
+
+ // The name by which the new column family should be referred to within the
+ // table, e.g. "foobar" rather than "<table_name>/columnFamilies/foobar".
+ string column_family_id = 2;
+
+ // The column family to create. The `name` field must be left blank.
+ ColumnFamily column_family = 3;
+}
+
+message DeleteColumnFamilyRequest {
+ // The unique name of the column family to be deleted.
+ string name = 1;
+}
+
+message BulkDeleteRowsRequest {
+ // The unique name of the table on which to perform the bulk delete
+ string table_name = 1;
+
+ oneof target {
+ // Delete all rows that start with this row key prefix. Prefix cannot be
+ // zero length.
+ bytes row_key_prefix = 2;
+
+ // Delete all rows in the table. Setting this to false is a no-op.
+ bool delete_all_data_from_table = 3;
+ }
+}
diff --git a/third_party/googleapis/google/bigtable/admin/v2/bigtable_instance_admin.proto b/third_party/googleapis/google/bigtable/admin/v2/bigtable_instance_admin.proto
new file mode 100644
index 0000000000..a4883bfcfa
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/admin/v2/bigtable_instance_admin.proto
@@ -0,0 +1,233 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.admin.v2;
+
+import "google/api/annotations.proto";
+import "google/bigtable/admin/v2/instance.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin";
+option java_multiple_files = true;
+option java_outer_classname = "BigtableInstanceAdminProto";
+option java_package = "com.google.bigtable.admin.v2";
+
+
+// Service for creating, configuring, and deleting Cloud Bigtable Instances and
+// Clusters. Provides access to the Instance and Cluster schemas only, not the
+// tables' metadata or data stored in those tables.
+service BigtableInstanceAdmin {
+ // Create an instance within a project.
+ rpc CreateInstance(CreateInstanceRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v2/{parent=projects/*}/instances" body: "*" };
+ }
+
+ // Gets information about an instance.
+ rpc GetInstance(GetInstanceRequest) returns (Instance) {
+ option (google.api.http) = { get: "/v2/{name=projects/*/instances/*}" };
+ }
+
+ // Lists information about instances in a project.
+ rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) {
+ option (google.api.http) = { get: "/v2/{parent=projects/*}/instances" };
+ }
+
+ // Updates an instance within a project.
+ rpc UpdateInstance(Instance) returns (Instance) {
+ option (google.api.http) = { put: "/v2/{name=projects/*/instances/*}" body: "*" };
+ }
+
+ // Delete an instance from a project.
+ rpc DeleteInstance(DeleteInstanceRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*}" };
+ }
+
+ // Creates a cluster within an instance.
+ rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*}/clusters" body: "cluster" };
+ }
+
+ // Gets information about a cluster.
+ rpc GetCluster(GetClusterRequest) returns (Cluster) {
+ option (google.api.http) = { get: "/v2/{name=projects/*/instances/*/clusters/*}" };
+ }
+
+ // Lists information about clusters in an instance.
+ rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) {
+ option (google.api.http) = { get: "/v2/{parent=projects/*/instances/*}/clusters" };
+ }
+
+ // Updates a cluster within an instance.
+ rpc UpdateCluster(Cluster) returns (google.longrunning.Operation) {
+ option (google.api.http) = { put: "/v2/{name=projects/*/instances/*/clusters/*}" body: "*" };
+ }
+
+ // Deletes a cluster from an instance.
+ rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*/clusters/*}" };
+ }
+}
+
+// Request message for BigtableInstanceAdmin.CreateInstance.
+message CreateInstanceRequest {
+ // The unique name of the project in which to create the new instance.
+ // Values are of the form `projects/<project>`.
+ string parent = 1;
+
+ // The ID to be used when referring to the new instance within its project,
+ // e.g., just `myinstance` rather than
+ // `projects/myproject/instances/myinstance`.
+ string instance_id = 2;
+
+ // The instance to create.
+ // Fields marked `OutputOnly` must be left blank.
+ Instance instance = 3;
+
+ // The clusters to be created within the instance, mapped by desired
+ // cluster ID, e.g., just `mycluster` rather than
+ // `projects/myproject/instances/myinstance/clusters/mycluster`.
+ // Fields marked `OutputOnly` must be left blank.
+ // Currently exactly one cluster must be specified.
+ map<string, Cluster> clusters = 4;
+}
+
+// Request message for BigtableInstanceAdmin.GetInstance.
+message GetInstanceRequest {
+ // The unique name of the requested instance. Values are of the form
+ // `projects/<project>/instances/<instance>`.
+ string name = 1;
+}
+
+// Request message for BigtableInstanceAdmin.ListInstances.
+message ListInstancesRequest {
+ // The unique name of the project for which a list of instances is requested.
+ // Values are of the form `projects/<project>`.
+ string parent = 1;
+
+ // The value of `next_page_token` returned by a previous call.
+ string page_token = 2;
+}
+
+// Response message for BigtableInstanceAdmin.ListInstances.
+message ListInstancesResponse {
+ // The list of requested instances.
+ repeated Instance instances = 1;
+
+ // Locations from which Instance information could not be retrieved,
+ // due to an outage or some other transient condition.
+ // Instances whose Clusters are all in one of the failed locations
+ // may be missing from `instances`, and Instances with at least one
+ // Cluster in a failed location may only have partial information returned.
+ repeated string failed_locations = 2;
+
+ // Set if not all instances could be returned in a single response.
+ // Pass this value to `page_token` in another request to get the next
+ // page of results.
+ string next_page_token = 3;
+}
+
+// Request message for BigtableInstanceAdmin.DeleteInstance.
+message DeleteInstanceRequest {
+ // The unique name of the instance to be deleted.
+ // Values are of the form `projects/<project>/instances/<instance>`.
+ string name = 1;
+}
+
+// Request message for BigtableInstanceAdmin.CreateCluster.
+message CreateClusterRequest {
+ // The unique name of the instance in which to create the new cluster.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>`.
+ string parent = 1;
+
+ // The ID to be used when referring to the new cluster within its instance,
+ // e.g., just `mycluster` rather than
+ // `projects/myproject/instances/myinstance/clusters/mycluster`.
+ string cluster_id = 2;
+
+ // The cluster to be created.
+ // Fields marked `OutputOnly` must be left blank.
+ Cluster cluster = 3;
+}
+
+// Request message for BigtableInstanceAdmin.GetCluster.
+message GetClusterRequest {
+ // The unique name of the requested cluster. Values are of the form
+ // `projects/<project>/instances/<instance>/clusters/<cluster>`.
+ string name = 1;
+}
+
+// Request message for BigtableInstanceAdmin.ListClusters.
+message ListClustersRequest {
+ // The unique name of the instance for which a list of clusters is requested.
+ // Values are of the form `projects/<project>/instances/<instance>`.
+ // Use `<instance> = '-'` to list Clusters for all Instances in a project,
+ // e.g., `projects/myproject/instances/-`.
+ string parent = 1;
+
+ // The value of `next_page_token` returned by a previous call.
+ string page_token = 2;
+}
+
+// Response message for BigtableInstanceAdmin.ListClusters.
+message ListClustersResponse {
+ // The list of requested clusters.
+ repeated Cluster clusters = 1;
+
+ // Locations from which Cluster information could not be retrieved,
+ // due to an outage or some other transient condition.
+ // Clusters from these locations may be missing from `clusters`,
+ // or may only have partial information returned.
+ repeated string failed_locations = 2;
+
+ // Set if not all clusters could be returned in a single response.
+ // Pass this value to `page_token` in another request to get the next
+ // page of results.
+ string next_page_token = 3;
+}
+
+// Request message for BigtableInstanceAdmin.DeleteCluster.
+message DeleteClusterRequest {
+ // The unique name of the cluster to be deleted. Values are of the form
+ // `projects/<project>/instances/<instance>/clusters/<cluster>`.
+ string name = 1;
+}
+
+// The metadata for the Operation returned by CreateInstance.
+message CreateInstanceMetadata {
+ // The request that prompted the initiation of this CreateInstance operation.
+ CreateInstanceRequest original_request = 1;
+
+ // The time at which the original request was received.
+ google.protobuf.Timestamp request_time = 2;
+
+ // The time at which the operation failed or was completed successfully.
+ google.protobuf.Timestamp finish_time = 3;
+}
+
+// The metadata for the Operation returned by UpdateCluster.
+message UpdateClusterMetadata {
+ // The request that prompted the initiation of this UpdateCluster operation.
+ Cluster original_request = 1;
+
+ // The time at which the original request was received.
+ google.protobuf.Timestamp request_time = 2;
+
+ // The time at which the operation failed or was completed successfully.
+ google.protobuf.Timestamp finish_time = 3;
+}
diff --git a/third_party/googleapis/google/bigtable/admin/v2/bigtable_table_admin.proto b/third_party/googleapis/google/bigtable/admin/v2/bigtable_table_admin.proto
new file mode 100644
index 0000000000..a8500cd439
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/admin/v2/bigtable_table_admin.proto
@@ -0,0 +1,214 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.admin.v2;
+
+import "google/api/annotations.proto";
+import "google/api/auth.proto";
+import "google/bigtable/admin/v2/table.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin";
+option java_multiple_files = true;
+option java_outer_classname = "BigtableTableAdminProto";
+option java_package = "com.google.bigtable.admin.v2";
+
+
+// Service for creating, configuring, and deleting Cloud Bigtable tables.
+// Provides access to the table schemas only, not the data stored within
+// the tables.
+service BigtableTableAdmin {
+ // Creates a new table in the specified instance.
+ // The table can be created with a full set of initial column families,
+ // specified in the request.
+ rpc CreateTable(CreateTableRequest) returns (Table) {
+ option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*}/tables" body: "*" };
+ }
+
+ // Lists all tables served from a specified instance.
+ rpc ListTables(ListTablesRequest) returns (ListTablesResponse) {
+ option (google.api.http) = { get: "/v2/{parent=projects/*/instances/*}/tables" };
+ }
+
+ // Gets metadata information about the specified table.
+ rpc GetTable(GetTableRequest) returns (Table) {
+ option (google.api.http) = { get: "/v2/{name=projects/*/instances/*/tables/*}" };
+ }
+
+ // Permanently deletes a specified table and all of its data.
+ rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*/tables/*}" };
+ }
+
+ // Atomically performs a series of column family modifications
+ // on the specified table.
+ rpc ModifyColumnFamilies(ModifyColumnFamiliesRequest) returns (Table) {
+ option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" body: "*" };
+ }
+
+ // Permanently drop/delete a row range from a specified table. The request can
+ // specify whether to delete all rows in a table, or only those that match a
+ // particular prefix.
+ rpc DropRowRange(DropRowRangeRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange" body: "*" };
+ }
+}
+
+// Request message for
+// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable]
+message CreateTableRequest {
+ // An initial split point for a newly created table.
+ message Split {
+ // Row key to use as an initial tablet boundary.
+ bytes key = 1;
+ }
+
+ // The unique name of the instance in which to create the table.
+ // Values are of the form `projects/<project>/instances/<instance>`.
+ string parent = 1;
+
+ // The name by which the new table should be referred to within the parent
+ // instance, e.g., `foobar` rather than `<parent>/tables/foobar`.
+ string table_id = 2;
+
+ // The Table to create.
+ Table table = 3;
+
+ // The optional list of row keys that will be used to initially split the
+ // table into several tablets (tablets are similar to HBase regions).
+ // Given two split keys, `s1` and `s2`, three tablets will be created,
+ // spanning the key ranges: `[, s1), [s1, s2), [s2, )`.
+ //
+ // Example:
+ //
+ // * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",`
+ // `"other", "zz"]`
+ // * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]`
+ // * Key assignment:
+ // - Tablet 1 `[, apple) => {"a"}.`
+ // - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.`
+ // - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.`
+ // - Tablet 4 `[customer_2, other) => {"customer_2"}.`
+ // - Tablet 5 `[other, ) => {"other", "zz"}.`
+ repeated Split initial_splits = 4;
+}
+
+// Request message for
+// [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange]
+message DropRowRangeRequest {
+ // The unique name of the table on which to drop a range of rows.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/tables/<table>`.
+ string name = 1;
+
+ // Delete all rows or by prefix.
+ oneof target {
+ // Delete all rows that start with this row key prefix. Prefix cannot be
+ // zero length.
+ bytes row_key_prefix = 2;
+
+ // Delete all rows in the table. Setting this to false is a no-op.
+ bool delete_all_data_from_table = 3;
+ }
+}
+
+// Request message for
+// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables]
+message ListTablesRequest {
+ // The unique name of the instance for which tables should be listed.
+ // Values are of the form `projects/<project>/instances/<instance>`.
+ string parent = 1;
+
+ // The view to be applied to the returned tables' fields.
+ // Defaults to `NAME_ONLY` if unspecified; no others are currently supported.
+ Table.View view = 2;
+
+ // The value of `next_page_token` returned by a previous call.
+ string page_token = 3;
+}
+
+// Response message for
+// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables]
+message ListTablesResponse {
+ // The tables present in the requested instance.
+ repeated Table tables = 1;
+
+ // Set if not all tables could be returned in a single response.
+ // Pass this value to `page_token` in another request to get the next
+ // page of results.
+ string next_page_token = 2;
+}
+
+// Request message for
+// [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable]
+message GetTableRequest {
+ // The unique name of the requested table.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/tables/<table>`.
+ string name = 1;
+
+ // The view to be applied to the returned table's fields.
+ // Defaults to `SCHEMA_ONLY` if unspecified.
+ Table.View view = 2;
+}
+
+// Request message for
+// [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable]
+message DeleteTableRequest {
+ // The unique name of the table to be deleted.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/tables/<table>`.
+ string name = 1;
+}
+
+// Request message for
+// [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies]
+message ModifyColumnFamiliesRequest {
+ // A create, update, or delete of a particular column family.
+ message Modification {
+ // The ID of the column family to be modified.
+ string id = 1;
+
+ // Column familiy modifications.
+ oneof mod {
+ // Create a new column family with the specified schema, or fail if
+ // one already exists with the given ID.
+ ColumnFamily create = 2;
+
+ // Update an existing column family to the specified schema, or fail
+ // if no column family exists with the given ID.
+ ColumnFamily update = 3;
+
+ // Drop (delete) the column family with the given ID, or fail if no such
+ // family exists.
+ bool drop = 4;
+ }
+ }
+
+ // The unique name of the table whose families should be modified.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/tables/<table>`.
+ string name = 1;
+
+ // Modifications to be atomically applied to the specified table's families.
+ // Entries are applied in order, meaning that earlier modifications can be
+ // masked by later ones (in the case of repeated updates to the same family,
+ // for example).
+ repeated Modification modifications = 2;
+}
diff --git a/third_party/googleapis/google/bigtable/admin/v2/common.proto b/third_party/googleapis/google/bigtable/admin/v2/common.proto
new file mode 100644
index 0000000000..c6e2bb223b
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/admin/v2/common.proto
@@ -0,0 +1,38 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.admin.v2;
+
+import "google/api/annotations.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin";
+option java_multiple_files = true;
+option java_outer_classname = "CommonProto";
+option java_package = "com.google.bigtable.admin.v2";
+
+
+// Storage media types for persisting Bigtable data.
+enum StorageType {
+ // The user did not specify a storage type.
+ STORAGE_TYPE_UNSPECIFIED = 0;
+
+ // Flash (SSD) storage should be used.
+ SSD = 1;
+
+ // Magnetic drive (HDD) storage should be used.
+ HDD = 2;
+}
diff --git a/third_party/googleapis/google/bigtable/admin/v2/instance.proto b/third_party/googleapis/google/bigtable/admin/v2/instance.proto
new file mode 100644
index 0000000000..67921d6e15
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/admin/v2/instance.proto
@@ -0,0 +1,130 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.admin.v2;
+
+import "google/api/annotations.proto";
+import "google/bigtable/admin/v2/common.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin";
+option java_multiple_files = true;
+option java_outer_classname = "InstanceProto";
+option java_package = "com.google.bigtable.admin.v2";
+
+
+// A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and
+// the resources that serve them.
+// All tables in an instance are served from a single
+// [Cluster][google.bigtable.admin.v2.Cluster].
+message Instance {
+ // Possible states of an instance.
+ enum State {
+ // The state of the instance could not be determined.
+ STATE_NOT_KNOWN = 0;
+
+ // The instance has been successfully created and can serve requests
+ // to its tables.
+ READY = 1;
+
+ // The instance is currently being created, and may be destroyed
+ // if the creation process encounters an error.
+ CREATING = 2;
+ }
+
+ // The type of the instance.
+ enum Type {
+ // The type of the instance is unspecified. If set when creating an
+ // instance, a `PRODUCTION` instance will be created. If set when updating
+ // an instance, the type will be left unchanged.
+ TYPE_UNSPECIFIED = 0;
+
+ // An instance meant for production use. `serve_nodes` must be set
+ // on the cluster.
+ PRODUCTION = 1;
+ }
+
+ // (`OutputOnly`)
+ // The unique name of the instance. Values are of the form
+ // `projects/<project>/instances/[a-z][a-z0-9\\-]+[a-z0-9]`.
+ string name = 1;
+
+ // The descriptive name for this instance as it appears in UIs.
+ // Can be changed at any time, but should be kept globally unique
+ // to avoid confusion.
+ string display_name = 2;
+
+ // (`OutputOnly`)
+ // The current state of the instance.
+ State state = 3;
+
+ // The type of the instance. Defaults to `PRODUCTION`.
+ Type type = 4;
+}
+
+// A resizable group of nodes in a particular cloud location, capable
+// of serving all [Tables][google.bigtable.admin.v2.Table] in the parent
+// [Instance][google.bigtable.admin.v2.Instance].
+message Cluster {
+ // Possible states of a cluster.
+ enum State {
+ // The state of the cluster could not be determined.
+ STATE_NOT_KNOWN = 0;
+
+ // The cluster has been successfully created and is ready to serve requests.
+ READY = 1;
+
+ // The cluster is currently being created, and may be destroyed
+ // if the creation process encounters an error.
+ // A cluster may not be able to serve requests while being created.
+ CREATING = 2;
+
+ // The cluster is currently being resized, and may revert to its previous
+ // node count if the process encounters an error.
+ // A cluster is still capable of serving requests while being resized,
+ // but may exhibit performance as if its number of allocated nodes is
+ // between the starting and requested states.
+ RESIZING = 3;
+
+ // The cluster has no backing nodes. The data (tables) still
+ // exist, but no operations can be performed on the cluster.
+ DISABLED = 4;
+ }
+
+ // (`OutputOnly`)
+ // The unique name of the cluster. Values are of the form
+ // `projects/<project>/instances/<instance>/clusters/[a-z][-a-z0-9]*`.
+ string name = 1;
+
+ // (`CreationOnly`)
+ // The location where this cluster's nodes and storage reside. For best
+ // performance, clients should be located as close as possible to this cluster.
+ // Currently only zones are supported, so values should be of the form
+ // `projects/<project>/locations/<zone>`.
+ string location = 2;
+
+ // (`OutputOnly`)
+ // The current state of the cluster.
+ State state = 3;
+
+ // The number of nodes allocated to this cluster. More nodes enable higher
+ // throughput and more consistent performance.
+ int32 serve_nodes = 4;
+
+ // (`CreationOnly`)
+ // The type of storage used by this cluster to serve its
+ // parent instance's tables, unless explicitly overridden.
+ StorageType default_storage_type = 5;
+}
diff --git a/third_party/googleapis/google/bigtable/admin/v2/table.proto b/third_party/googleapis/google/bigtable/admin/v2/table.proto
new file mode 100644
index 0000000000..ce80571f0f
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/admin/v2/table.proto
@@ -0,0 +1,118 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.admin.v2;
+
+import "google/api/annotations.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin";
+option java_multiple_files = true;
+option java_outer_classname = "TableProto";
+option java_package = "com.google.bigtable.admin.v2";
+
+
+// A collection of user data indexed by row, column, and timestamp.
+// Each table is served using the resources of its parent cluster.
+message Table {
+ // Possible timestamp granularities to use when keeping multiple versions
+ // of data in a table.
+ enum TimestampGranularity {
+ // The user did not specify a granularity. Should not be returned.
+ // When specified during table creation, MILLIS will be used.
+ TIMESTAMP_GRANULARITY_UNSPECIFIED = 0;
+
+ // The table keeps data versioned at a granularity of 1ms.
+ MILLIS = 1;
+ }
+
+ // Defines a view over a table's fields.
+ enum View {
+ // Uses the default view for each method as documented in its request.
+ VIEW_UNSPECIFIED = 0;
+
+ // Only populates `name`.
+ NAME_ONLY = 1;
+
+ // Only populates `name` and fields related to the table's schema.
+ SCHEMA_VIEW = 2;
+
+ // Populates all fields.
+ FULL = 4;
+ }
+
+ // (`OutputOnly`)
+ // The unique name of the table. Values are of the form
+ // `projects/<project>/instances/<instance>/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`.
+ // Views: `NAME_ONLY`, `SCHEMA_VIEW`, `FULL`
+ string name = 1;
+
+ // (`CreationOnly`)
+ // The column families configured for this table, mapped by column family ID.
+ // Views: `SCHEMA_VIEW`, `FULL`
+ map<string, ColumnFamily> column_families = 3;
+
+ // (`CreationOnly`)
+ // The granularity (e.g. `MILLIS`, `MICROS`) at which timestamps are stored in
+ // this table. Timestamps not matching the granularity will be rejected.
+ // If unspecified at creation time, the value will be set to `MILLIS`.
+ // Views: `SCHEMA_VIEW`, `FULL`
+ TimestampGranularity granularity = 4;
+}
+
+// A set of columns within a table which share a common configuration.
+message ColumnFamily {
+ // Garbage collection rule specified as a protobuf.
+ // Must serialize to at most 500 bytes.
+ //
+ // NOTE: Garbage collection executes opportunistically in the background, and
+ // so it's possible for reads to return a cell even if it matches the active
+ // GC expression for its family.
+ GcRule gc_rule = 1;
+}
+
+// Rule for determining which cells to delete during garbage collection.
+message GcRule {
+ // A GcRule which deletes cells matching all of the given rules.
+ message Intersection {
+ // Only delete cells which would be deleted by every element of `rules`.
+ repeated GcRule rules = 1;
+ }
+
+ // A GcRule which deletes cells matching any of the given rules.
+ message Union {
+ // Delete cells which would be deleted by any element of `rules`.
+ repeated GcRule rules = 1;
+ }
+
+ // Garbage collection rules.
+ oneof rule {
+ // Delete all cells in a column except the most recent N.
+ int32 max_num_versions = 1;
+
+ // Delete cells in a column older than the given age.
+ // Values must be at least one millisecond, and will be truncated to
+ // microsecond granularity.
+ google.protobuf.Duration max_age = 2;
+
+ // Delete cells that would be deleted by every nested rule.
+ Intersection intersection = 3;
+
+ // Delete cells that would be deleted by any nested rule.
+ Union union = 4;
+ }
+}
diff --git a/third_party/googleapis/google/bigtable/bigtable.yaml b/third_party/googleapis/google/bigtable/bigtable.yaml
new file mode 100644
index 0000000000..f0ce19f90d
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/bigtable.yaml
@@ -0,0 +1,33 @@
+# Google Bigtable API service configuration
+
+type: google.api.Service
+config_version: 0
+name: bigtable.googleapis.com
+title: Google Cloud Bigtable API
+
+documentation:
+ summary:
+ Google Cloud Bigtable - http://cloud.google.com/bigtable/
+
+apis:
+- name: google.bigtable.v2.Bigtable
+
+authentication:
+ rules:
+ # Unless explicitly weakened, all ops require write access
+ - selector: '*'
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/bigtable.data,
+ https://www.googleapis.com/auth/cloud-bigtable.data,
+ https://www.googleapis.com/auth/cloud-platform
+
+ # Ops which only require read access
+ - selector: google.bigtable.v2.Bigtable.ReadRows,
+ google.bigtable.v2.Bigtable.SampleRowKeys
+ oauth:
+ canonical_scopes: https://www.googleapis.com/auth/bigtable.data,
+ https://www.googleapis.com/auth/bigtable.data.readonly,
+ https://www.googleapis.com/auth/cloud-bigtable.data,
+ https://www.googleapis.com/auth/cloud-bigtable.data.readonly,
+ https://www.googleapis.com/auth/cloud-platform,
+ https://www.googleapis.com/auth/cloud-platform.read-only
diff --git a/third_party/googleapis/google/bigtable/v1/bigtable_data.proto b/third_party/googleapis/google/bigtable/v1/bigtable_data.proto
new file mode 100644
index 0000000000..fe6518ad20
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/v1/bigtable_data.proto
@@ -0,0 +1,516 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.v1;
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/v1;bigtable";
+option java_multiple_files = true;
+option java_outer_classname = "BigtableDataProto";
+option java_package = "com.google.bigtable.v1";
+
+
+// Specifies the complete (requested) contents of a single row of a table.
+// Rows which exceed 256MiB in size cannot be read in full.
+message Row {
+ // The unique key which identifies this row within its table. This is the same
+ // key that's used to identify the row in, for example, a MutateRowRequest.
+ // May contain any non-empty byte string up to 4KiB in length.
+ bytes key = 1;
+
+ // May be empty, but only if the entire row is empty.
+ // The mutual ordering of column families is not specified.
+ repeated Family families = 2;
+}
+
+// Specifies (some of) the contents of a single row/column family of a table.
+message Family {
+ // The unique key which identifies this family within its row. This is the
+ // same key that's used to identify the family in, for example, a RowFilter
+ // which sets its "family_name_regex_filter" field.
+ // Must match [-_.a-zA-Z0-9]+, except that AggregatingRowProcessors may
+ // produce cells in a sentinel family with an empty name.
+ // Must be no greater than 64 characters in length.
+ string name = 1;
+
+ // Must not be empty. Sorted in order of increasing "qualifier".
+ repeated Column columns = 2;
+}
+
+// Specifies (some of) the contents of a single row/column of a table.
+message Column {
+ // The unique key which identifies this column within its family. This is the
+ // same key that's used to identify the column in, for example, a RowFilter
+ // which sets its "column_qualifier_regex_filter" field.
+ // May contain any byte string, including the empty string, up to 16kiB in
+ // length.
+ bytes qualifier = 1;
+
+ // Must not be empty. Sorted in order of decreasing "timestamp_micros".
+ repeated Cell cells = 2;
+}
+
+// Specifies (some of) the contents of a single row/column/timestamp of a table.
+message Cell {
+ // The cell's stored timestamp, which also uniquely identifies it within
+ // its column.
+ // Values are always expressed in microseconds, but individual tables may set
+ // a coarser "granularity" to further restrict the allowed values. For
+ // example, a table which specifies millisecond granularity will only allow
+ // values of "timestamp_micros" which are multiples of 1000.
+ int64 timestamp_micros = 1;
+
+ // The value stored in the cell.
+ // May contain any byte string, including the empty string, up to 100MiB in
+ // length.
+ bytes value = 2;
+
+ // Labels applied to the cell by a [RowFilter][google.bigtable.v1.RowFilter].
+ repeated string labels = 3;
+}
+
+// Specifies a contiguous range of rows.
+message RowRange {
+ // Inclusive lower bound. If left empty, interpreted as the empty string.
+ bytes start_key = 2;
+
+ // Exclusive upper bound. If left empty, interpreted as infinity.
+ bytes end_key = 3;
+}
+
+// Specifies a non-contiguous set of rows.
+message RowSet {
+ // Single rows included in the set.
+ repeated bytes row_keys = 1;
+
+ // Contiguous row ranges included in the set.
+ repeated RowRange row_ranges = 2;
+}
+
+// Specifies a contiguous range of columns within a single column family.
+// The range spans from <column_family>:<start_qualifier> to
+// <column_family>:<end_qualifier>, where both bounds can be either inclusive or
+// exclusive.
+message ColumnRange {
+ // The name of the column family within which this range falls.
+ string family_name = 1;
+
+ // The column qualifier at which to start the range (within 'column_family').
+ // If neither field is set, interpreted as the empty string, inclusive.
+ oneof start_qualifier {
+ // Used when giving an inclusive lower bound for the range.
+ bytes start_qualifier_inclusive = 2;
+
+ // Used when giving an exclusive lower bound for the range.
+ bytes start_qualifier_exclusive = 3;
+ }
+
+ // The column qualifier at which to end the range (within 'column_family').
+ // If neither field is set, interpreted as the infinite string, exclusive.
+ oneof end_qualifier {
+ // Used when giving an inclusive upper bound for the range.
+ bytes end_qualifier_inclusive = 4;
+
+ // Used when giving an exclusive upper bound for the range.
+ bytes end_qualifier_exclusive = 5;
+ }
+}
+
+// Specified a contiguous range of microsecond timestamps.
+message TimestampRange {
+ // Inclusive lower bound. If left empty, interpreted as 0.
+ int64 start_timestamp_micros = 1;
+
+ // Exclusive upper bound. If left empty, interpreted as infinity.
+ int64 end_timestamp_micros = 2;
+}
+
+// Specifies a contiguous range of raw byte values.
+message ValueRange {
+ // The value at which to start the range.
+ // If neither field is set, interpreted as the empty string, inclusive.
+ oneof start_value {
+ // Used when giving an inclusive lower bound for the range.
+ bytes start_value_inclusive = 1;
+
+ // Used when giving an exclusive lower bound for the range.
+ bytes start_value_exclusive = 2;
+ }
+
+ // The value at which to end the range.
+ // If neither field is set, interpreted as the infinite string, exclusive.
+ oneof end_value {
+ // Used when giving an inclusive upper bound for the range.
+ bytes end_value_inclusive = 3;
+
+ // Used when giving an exclusive upper bound for the range.
+ bytes end_value_exclusive = 4;
+ }
+}
+
+// Takes a row as input and produces an alternate view of the row based on
+// specified rules. For example, a RowFilter might trim down a row to include
+// just the cells from columns matching a given regular expression, or might
+// return all the cells of a row but not their values. More complicated filters
+// can be composed out of these components to express requests such as, "within
+// every column of a particular family, give just the two most recent cells
+// which are older than timestamp X."
+//
+// There are two broad categories of RowFilters (true filters and transformers),
+// as well as two ways to compose simple filters into more complex ones
+// (chains and interleaves). They work as follows:
+//
+// * True filters alter the input row by excluding some of its cells wholesale
+// from the output row. An example of a true filter is the "value_regex_filter",
+// which excludes cells whose values don't match the specified pattern. All
+// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax)
+// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An
+// important point to keep in mind is that RE2(.) is equivalent by default to
+// RE2([^\n]), meaning that it does not match newlines. When attempting to match
+// an arbitrary byte, you should therefore use the escape sequence '\C', which
+// may need to be further escaped as '\\C' in your client language.
+//
+// * Transformers alter the input row by changing the values of some of its
+// cells in the output, without excluding them completely. Currently, the only
+// supported transformer is the "strip_value_transformer", which replaces every
+// cell's value with the empty string.
+//
+// * Chains and interleaves are described in more detail in the
+// RowFilter.Chain and RowFilter.Interleave documentation.
+//
+// The total serialized size of a RowFilter message must not
+// exceed 4096 bytes, and RowFilters may not be nested within each other
+// (in Chains or Interleaves) to a depth of more than 20.
+message RowFilter {
+ // A RowFilter which sends rows through several RowFilters in sequence.
+ message Chain {
+ // The elements of "filters" are chained together to process the input row:
+ // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row
+ // The full chain is executed atomically.
+ repeated RowFilter filters = 1;
+ }
+
+ // A RowFilter which sends each row to each of several component
+ // RowFilters and interleaves the results.
+ message Interleave {
+ // The elements of "filters" all process a copy of the input row, and the
+ // results are pooled, sorted, and combined into a single output row.
+ // If multiple cells are produced with the same column and timestamp,
+ // they will all appear in the output row in an unspecified mutual order.
+ // Consider the following example, with three filters:
+ //
+ // input row
+ // |
+ // -----------------------------------------------------
+ // | | |
+ // f(0) f(1) f(2)
+ // | | |
+ // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a
+ // 2: foo,blah,11,z far,blah,5,x far,blah,5,x
+ // | | |
+ // -----------------------------------------------------
+ // |
+ // 1: foo,bar,10,z // could have switched with #2
+ // 2: foo,bar,10,x // could have switched with #1
+ // 3: foo,blah,11,z
+ // 4: far,bar,7,a
+ // 5: far,blah,5,x // identical to #6
+ // 6: far,blah,5,x // identical to #5
+ // All interleaved filters are executed atomically.
+ repeated RowFilter filters = 1;
+ }
+
+ // A RowFilter which evaluates one of two possible RowFilters, depending on
+ // whether or not a predicate RowFilter outputs any cells from the input row.
+ //
+ // IMPORTANT NOTE: The predicate filter does not execute atomically with the
+ // true and false filters, which may lead to inconsistent or unexpected
+ // results. Additionally, Condition filters have poor performance, especially
+ // when filters are set for the false condition.
+ message Condition {
+ // If "predicate_filter" outputs any cells, then "true_filter" will be
+ // evaluated on the input row. Otherwise, "false_filter" will be evaluated.
+ RowFilter predicate_filter = 1;
+
+ // The filter to apply to the input row if "predicate_filter" returns any
+ // results. If not provided, no results will be returned in the true case.
+ RowFilter true_filter = 2;
+
+ // The filter to apply to the input row if "predicate_filter" does not
+ // return any results. If not provided, no results will be returned in the
+ // false case.
+ RowFilter false_filter = 3;
+ }
+
+ // Which of the possible RowFilter types to apply. If none are set, this
+ // RowFilter returns all cells in the input row.
+ oneof filter {
+ // Applies several RowFilters to the data in sequence, progressively
+ // narrowing the results.
+ Chain chain = 1;
+
+ // Applies several RowFilters to the data in parallel and combines the
+ // results.
+ Interleave interleave = 2;
+
+ // Applies one of two possible RowFilters to the data based on the output of
+ // a predicate RowFilter.
+ Condition condition = 3;
+
+ // ADVANCED USE ONLY.
+ // Hook for introspection into the RowFilter. Outputs all cells directly to
+ // the output of the read rather than to any parent filter. Consider the
+ // following example:
+ //
+ // Chain(
+ // FamilyRegex("A"),
+ // Interleave(
+ // All(),
+ // Chain(Label("foo"), Sink())
+ // ),
+ // QualifierRegex("B")
+ // )
+ //
+ // A,A,1,w
+ // A,B,2,x
+ // B,B,4,z
+ // |
+ // FamilyRegex("A")
+ // |
+ // A,A,1,w
+ // A,B,2,x
+ // |
+ // +------------+-------------+
+ // | |
+ // All() Label(foo)
+ // | |
+ // A,A,1,w A,A,1,w,labels:[foo]
+ // A,B,2,x A,B,2,x,labels:[foo]
+ // | |
+ // | Sink() --------------+
+ // | | |
+ // +------------+ x------+ A,A,1,w,labels:[foo]
+ // | A,B,2,x,labels:[foo]
+ // A,A,1,w |
+ // A,B,2,x |
+ // | |
+ // QualifierRegex("B") |
+ // | |
+ // A,B,2,x |
+ // | |
+ // +--------------------------------+
+ // |
+ // A,A,1,w,labels:[foo]
+ // A,B,2,x,labels:[foo] // could be switched
+ // A,B,2,x // could be switched
+ //
+ // Despite being excluded by the qualifier filter, a copy of every cell
+ // that reaches the sink is present in the final result.
+ //
+ // As with an [Interleave][google.bigtable.v1.RowFilter.Interleave],
+ // duplicate cells are possible, and appear in an unspecified mutual order.
+ // In this case we have a duplicate with column "A:B" and timestamp 2,
+ // because one copy passed through the all filter while the other was
+ // passed through the label and sink. Note that one copy has label "foo",
+ // while the other does not.
+ //
+ // Cannot be used within the `predicate_filter`, `true_filter`, or
+ // `false_filter` of a [Condition][google.bigtable.v1.RowFilter.Condition].
+ bool sink = 16;
+
+ // Matches all cells, regardless of input. Functionally equivalent to
+ // leaving `filter` unset, but included for completeness.
+ bool pass_all_filter = 17;
+
+ // Does not match any cells, regardless of input. Useful for temporarily
+ // disabling just part of a filter.
+ bool block_all_filter = 18;
+
+ // Matches only cells from rows whose keys satisfy the given RE2 regex. In
+ // other words, passes through the entire row when the key matches, and
+ // otherwise produces an empty row.
+ // Note that, since row keys can contain arbitrary bytes, the '\C' escape
+ // sequence must be used if a true wildcard is desired. The '.' character
+ // will not match the new line character '\n', which may be present in a
+ // binary key.
+ bytes row_key_regex_filter = 4;
+
+ // Matches all cells from a row with probability p, and matches no cells
+ // from the row with probability 1-p.
+ double row_sample_filter = 14;
+
+ // Matches only cells from columns whose families satisfy the given RE2
+ // regex. For technical reasons, the regex must not contain the ':'
+ // character, even if it is not being used as a literal.
+ // Note that, since column families cannot contain the new line character
+ // '\n', it is sufficient to use '.' as a full wildcard when matching
+ // column family names.
+ string family_name_regex_filter = 5;
+
+ // Matches only cells from columns whose qualifiers satisfy the given RE2
+ // regex.
+ // Note that, since column qualifiers can contain arbitrary bytes, the '\C'
+ // escape sequence must be used if a true wildcard is desired. The '.'
+ // character will not match the new line character '\n', which may be
+ // present in a binary qualifier.
+ bytes column_qualifier_regex_filter = 6;
+
+ // Matches only cells from columns within the given range.
+ ColumnRange column_range_filter = 7;
+
+ // Matches only cells with timestamps within the given range.
+ TimestampRange timestamp_range_filter = 8;
+
+ // Matches only cells with values that satisfy the given regular expression.
+ // Note that, since cell values can contain arbitrary bytes, the '\C' escape
+ // sequence must be used if a true wildcard is desired. The '.' character
+ // will not match the new line character '\n', which may be present in a
+ // binary value.
+ bytes value_regex_filter = 9;
+
+ // Matches only cells with values that fall within the given range.
+ ValueRange value_range_filter = 15;
+
+ // Skips the first N cells of each row, matching all subsequent cells.
+ // If duplicate cells are present, as is possible when using an Interleave,
+ // each copy of the cell is counted separately.
+ int32 cells_per_row_offset_filter = 10;
+
+ // Matches only the first N cells of each row.
+ // If duplicate cells are present, as is possible when using an Interleave,
+ // each copy of the cell is counted separately.
+ int32 cells_per_row_limit_filter = 11;
+
+ // Matches only the most recent N cells within each column. For example,
+ // if N=2, this filter would match column "foo:bar" at timestamps 10 and 9,
+ // skip all earlier cells in "foo:bar", and then begin matching again in
+ // column "foo:bar2".
+ // If duplicate cells are present, as is possible when using an Interleave,
+ // each copy of the cell is counted separately.
+ int32 cells_per_column_limit_filter = 12;
+
+ // Replaces each cell's value with the empty string.
+ bool strip_value_transformer = 13;
+
+ // Applies the given label to all cells in the output row. This allows
+ // the client to determine which results were produced from which part of
+ // the filter.
+ //
+ // Values must be at most 15 characters in length, and match the RE2
+ // pattern [a-z0-9\\-]+
+ //
+ // Due to a technical limitation, it is not currently possible to apply
+ // multiple labels to a cell. As a result, a Chain may have no more than
+ // one sub-filter which contains a apply_label_transformer. It is okay for
+ // an Interleave to contain multiple apply_label_transformers, as they will
+ // be applied to separate copies of the input. This may be relaxed in the
+ // future.
+ string apply_label_transformer = 19;
+ }
+}
+
+// Specifies a particular change to be made to the contents of a row.
+message Mutation {
+ // A Mutation which sets the value of the specified cell.
+ message SetCell {
+ // The name of the family into which new data should be written.
+ // Must match [-_.a-zA-Z0-9]+
+ string family_name = 1;
+
+ // The qualifier of the column into which new data should be written.
+ // Can be any byte string, including the empty string.
+ bytes column_qualifier = 2;
+
+ // The timestamp of the cell into which new data should be written.
+ // Use -1 for current Bigtable server time.
+ // Otherwise, the client should set this value itself, noting that the
+ // default value is a timestamp of zero if the field is left unspecified.
+ // Values must match the "granularity" of the table (e.g. micros, millis).
+ int64 timestamp_micros = 3;
+
+ // The value to be written into the specified cell.
+ bytes value = 4;
+ }
+
+ // A Mutation which deletes cells from the specified column, optionally
+ // restricting the deletions to a given timestamp range.
+ message DeleteFromColumn {
+ // The name of the family from which cells should be deleted.
+ // Must match [-_.a-zA-Z0-9]+
+ string family_name = 1;
+
+ // The qualifier of the column from which cells should be deleted.
+ // Can be any byte string, including the empty string.
+ bytes column_qualifier = 2;
+
+ // The range of timestamps within which cells should be deleted.
+ TimestampRange time_range = 3;
+ }
+
+ // A Mutation which deletes all cells from the specified column family.
+ message DeleteFromFamily {
+ // The name of the family from which cells should be deleted.
+ // Must match [-_.a-zA-Z0-9]+
+ string family_name = 1;
+ }
+
+ // A Mutation which deletes all cells from the containing row.
+ message DeleteFromRow {
+
+ }
+
+ // Which of the possible Mutation types to apply.
+ oneof mutation {
+ // Set a cell's value.
+ SetCell set_cell = 1;
+
+ // Deletes cells from a column.
+ DeleteFromColumn delete_from_column = 2;
+
+ // Deletes cells from a column family.
+ DeleteFromFamily delete_from_family = 3;
+
+ // Deletes cells from the entire row.
+ DeleteFromRow delete_from_row = 4;
+ }
+}
+
+// Specifies an atomic read/modify/write operation on the latest value of the
+// specified column.
+message ReadModifyWriteRule {
+ // The name of the family to which the read/modify/write should be applied.
+ // Must match [-_.a-zA-Z0-9]+
+ string family_name = 1;
+
+ // The qualifier of the column to which the read/modify/write should be
+ // applied.
+ // Can be any byte string, including the empty string.
+ bytes column_qualifier = 2;
+
+ // The rule used to determine the column's new latest value from its current
+ // latest value.
+ oneof rule {
+ // Rule specifying that "append_value" be appended to the existing value.
+ // If the targeted cell is unset, it will be treated as containing the
+ // empty string.
+ bytes append_value = 3;
+
+ // Rule specifying that "increment_amount" be added to the existing value.
+ // If the targeted cell is unset, it will be treated as containing a zero.
+ // Otherwise, the targeted cell must contain an 8-byte value (interpreted
+ // as a 64-bit big-endian signed integer), or the entire request will fail.
+ int64 increment_amount = 4;
+ }
+}
diff --git a/third_party/googleapis/google/bigtable/v1/bigtable_service.proto b/third_party/googleapis/google/bigtable/v1/bigtable_service.proto
new file mode 100644
index 0000000000..6d41a1b842
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/v1/bigtable_service.proto
@@ -0,0 +1,74 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.v1;
+
+import "google/api/annotations.proto";
+import "google/bigtable/v1/bigtable_data.proto";
+import "google/bigtable/v1/bigtable_service_messages.proto";
+import "google/protobuf/empty.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/v1;bigtable";
+option java_generic_services = true;
+option java_multiple_files = true;
+option java_outer_classname = "BigtableServicesProto";
+option java_package = "com.google.bigtable.v1";
+
+
+// Service for reading from and writing to existing Bigtables.
+service BigtableService {
+ // Streams back the contents of all requested rows, optionally applying
+ // the same Reader filter to each. Depending on their size, rows may be
+ // broken up across multiple responses, but atomicity of each row will still
+ // be preserved.
+ rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) {
+ option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:read" body: "*" };
+ }
+
+ // Returns a sample of row keys in the table. The returned row keys will
+ // delimit contiguous sections of the table of approximately equal size,
+ // which can be used to break up the data for distributed tasks like
+ // mapreduces.
+ rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) {
+ option (google.api.http) = { get: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:sampleKeys" };
+ }
+
+ // Mutates a row atomically. Cells already present in the row are left
+ // unchanged unless explicitly changed by 'mutation'.
+ rpc MutateRow(MutateRowRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:mutate" body: "*" };
+ }
+
+ // Mutates multiple rows in a batch. Each individual row is mutated
+ // atomically as in MutateRow, but the entire batch is not executed
+ // atomically.
+ rpc MutateRows(MutateRowsRequest) returns (MutateRowsResponse) {
+ option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:mutateRows" body: "*" };
+ }
+
+ // Mutates a row atomically based on the output of a predicate Reader filter.
+ rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) {
+ option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:checkAndMutate" body: "*" };
+ }
+
+ // Modifies a row atomically, reading the latest existing timestamp/value from
+ // the specified columns and writing a new value at
+ // max(existing timestamp, current server time) based on pre-defined
+ // read/modify/write rules. Returns the new contents of all modified cells.
+ rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (Row) {
+ option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:readModifyWrite" body: "*" };
+ }
+}
diff --git a/third_party/googleapis/google/bigtable/v1/bigtable_service_messages.proto b/third_party/googleapis/google/bigtable/v1/bigtable_service_messages.proto
new file mode 100644
index 0000000000..6d75af78e1
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/v1/bigtable_service_messages.proto
@@ -0,0 +1,218 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.v1;
+
+import "google/bigtable/v1/bigtable_data.proto";
+import "google/rpc/status.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/v1;bigtable";
+option java_multiple_files = true;
+option java_outer_classname = "BigtableServiceMessagesProto";
+option java_package = "com.google.bigtable.v1";
+
+
+// Request message for BigtableServer.ReadRows.
+message ReadRowsRequest {
+ // The unique name of the table from which to read.
+ string table_name = 1;
+
+ // If neither row_key nor row_range is set, reads from all rows.
+ oneof target {
+ // The key of a single row from which to read.
+ bytes row_key = 2;
+
+ // A range of rows from which to read.
+ RowRange row_range = 3;
+
+ // A set of rows from which to read. Entries need not be in order, and will
+ // be deduplicated before reading.
+ // The total serialized size of the set must not exceed 1MB.
+ RowSet row_set = 8;
+ }
+
+ // The filter to apply to the contents of the specified row(s). If unset,
+ // reads the entire table.
+ RowFilter filter = 5;
+
+ // By default, rows are read sequentially, producing results which are
+ // guaranteed to arrive in increasing row order. Setting
+ // "allow_row_interleaving" to true allows multiple rows to be interleaved in
+ // the response stream, which increases throughput but breaks this guarantee,
+ // and may force the client to use more memory to buffer partially-received
+ // rows. Cannot be set to true when specifying "num_rows_limit".
+ bool allow_row_interleaving = 6;
+
+ // The read will terminate after committing to N rows' worth of results. The
+ // default (zero) is to return all results.
+ // Note that "allow_row_interleaving" cannot be set to true when this is set.
+ int64 num_rows_limit = 7;
+}
+
+// Response message for BigtableService.ReadRows.
+message ReadRowsResponse {
+ // Specifies a piece of a row's contents returned as part of the read
+ // response stream.
+ message Chunk {
+ oneof chunk {
+ // A subset of the data from a particular row. As long as no "reset_row"
+ // is received in between, multiple "row_contents" from the same row are
+ // from the same atomic view of that row, and will be received in the
+ // expected family/column/timestamp order.
+ Family row_contents = 1;
+
+ // Indicates that the client should drop all previous chunks for
+ // "row_key", as it will be re-read from the beginning.
+ bool reset_row = 2;
+
+ // Indicates that the client can safely process all previous chunks for
+ // "row_key", as its data has been fully read.
+ bool commit_row = 3;
+ }
+ }
+
+ // The key of the row for which we're receiving data.
+ // Results will be received in increasing row key order, unless
+ // "allow_row_interleaving" was specified in the request.
+ bytes row_key = 1;
+
+ // One or more chunks of the row specified by "row_key".
+ repeated Chunk chunks = 2;
+}
+
+// Request message for BigtableService.SampleRowKeys.
+message SampleRowKeysRequest {
+ // The unique name of the table from which to sample row keys.
+ string table_name = 1;
+}
+
+// Response message for BigtableService.SampleRowKeys.
+message SampleRowKeysResponse {
+ // Sorted streamed sequence of sample row keys in the table. The table might
+ // have contents before the first row key in the list and after the last one,
+ // but a key containing the empty string indicates "end of table" and will be
+ // the last response given, if present.
+ // Note that row keys in this list may not have ever been written to or read
+ // from, and users should therefore not make any assumptions about the row key
+ // structure that are specific to their use case.
+ bytes row_key = 1;
+
+ // Approximate total storage space used by all rows in the table which precede
+ // "row_key". Buffering the contents of all rows between two subsequent
+ // samples would require space roughly equal to the difference in their
+ // "offset_bytes" fields.
+ int64 offset_bytes = 2;
+}
+
+// Request message for BigtableService.MutateRow.
+message MutateRowRequest {
+ // The unique name of the table to which the mutation should be applied.
+ string table_name = 1;
+
+ // The key of the row to which the mutation should be applied.
+ bytes row_key = 2;
+
+ // Changes to be atomically applied to the specified row. Entries are applied
+ // in order, meaning that earlier mutations can be masked by later ones.
+ // Must contain at least one entry and at most 100000.
+ repeated Mutation mutations = 3;
+}
+
+// Request message for BigtableService.MutateRows.
+message MutateRowsRequest {
+ message Entry {
+ // The key of the row to which the `mutations` should be applied.
+ bytes row_key = 1;
+
+ // Changes to be atomically applied to the specified row. Mutations are
+ // applied in order, meaning that earlier mutations can be masked by
+ // later ones.
+ // At least one mutation must be specified.
+ repeated Mutation mutations = 2;
+ }
+
+ // The unique name of the table to which the mutations should be applied.
+ string table_name = 1;
+
+ // The row keys/mutations to be applied in bulk.
+ // Each entry is applied as an atomic mutation, but the entries may be
+ // applied in arbitrary order (even between entries for the same row).
+ // At least one entry must be specified, and in total the entries may
+ // contain at most 100000 mutations.
+ repeated Entry entries = 2;
+}
+
+// Response message for BigtableService.MutateRows.
+message MutateRowsResponse {
+ // The results for each Entry from the request, presented in the order
+ // in which the entries were originally given.
+ // Depending on how requests are batched during execution, it is possible
+ // for one Entry to fail due to an error with another Entry. In the event
+ // that this occurs, the same error will be reported for both entries.
+ repeated google.rpc.Status statuses = 1;
+}
+
+// Request message for BigtableService.CheckAndMutateRowRequest
+message CheckAndMutateRowRequest {
+ // The unique name of the table to which the conditional mutation should be
+ // applied.
+ string table_name = 1;
+
+ // The key of the row to which the conditional mutation should be applied.
+ bytes row_key = 2;
+
+ // The filter to be applied to the contents of the specified row. Depending
+ // on whether or not any results are yielded, either "true_mutations" or
+ // "false_mutations" will be executed. If unset, checks that the row contains
+ // any values at all.
+ RowFilter predicate_filter = 6;
+
+ // Changes to be atomically applied to the specified row if "predicate_filter"
+ // yields at least one cell when applied to "row_key". Entries are applied in
+ // order, meaning that earlier mutations can be masked by later ones.
+ // Must contain at least one entry if "false_mutations" is empty, and at most
+ // 100000.
+ repeated Mutation true_mutations = 4;
+
+ // Changes to be atomically applied to the specified row if "predicate_filter"
+ // does not yield any cells when applied to "row_key". Entries are applied in
+ // order, meaning that earlier mutations can be masked by later ones.
+ // Must contain at least one entry if "true_mutations" is empty, and at most
+ // 100000.
+ repeated Mutation false_mutations = 5;
+}
+
+// Response message for BigtableService.CheckAndMutateRowRequest.
+message CheckAndMutateRowResponse {
+ // Whether or not the request's "predicate_filter" yielded any results for
+ // the specified row.
+ bool predicate_matched = 1;
+}
+
+// Request message for BigtableService.ReadModifyWriteRowRequest.
+message ReadModifyWriteRowRequest {
+ // The unique name of the table to which the read/modify/write rules should be
+ // applied.
+ string table_name = 1;
+
+ // The key of the row to which the read/modify/write rules should be applied.
+ bytes row_key = 2;
+
+ // Rules specifying how the specified row's contents are to be transformed
+ // into writes. Entries are applied in order, meaning that earlier rules will
+ // affect the results of later ones.
+ repeated ReadModifyWriteRule rules = 3;
+}
diff --git a/third_party/googleapis/google/bigtable/v2/bigtable.proto b/third_party/googleapis/google/bigtable/v2/bigtable.proto
new file mode 100644
index 0000000000..5e8859ed52
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/v2/bigtable.proto
@@ -0,0 +1,322 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.v2;
+
+import "google/api/annotations.proto";
+import "google/bigtable/v2/data.proto";
+import "google/protobuf/wrappers.proto";
+import "google/rpc/status.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/v2;bigtable";
+option java_multiple_files = true;
+option java_outer_classname = "BigtableProto";
+option java_package = "com.google.bigtable.v2";
+
+
+// Service for reading from and writing to existing Bigtable tables.
+service Bigtable {
+ // Streams back the contents of all requested rows, optionally
+ // applying the same Reader filter to each. Depending on their size,
+ // rows and cells may be broken up across multiple responses, but
+ // atomicity of each row will still be preserved. See the
+ // ReadRowsResponse documentation for details.
+ rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) {
+ option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readRows" body: "*" };
+ }
+
+ // Returns a sample of row keys in the table. The returned row keys will
+ // delimit contiguous sections of the table of approximately equal size,
+ // which can be used to break up the data for distributed tasks like
+ // mapreduces.
+ rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) {
+ option (google.api.http) = { get: "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" };
+ }
+
+ // Mutates a row atomically. Cells already present in the row are left
+ // unchanged unless explicitly changed by `mutation`.
+ rpc MutateRow(MutateRowRequest) returns (MutateRowResponse) {
+ option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow" body: "*" };
+ }
+
+ // Mutates multiple rows in a batch. Each individual row is mutated
+ // atomically as in MutateRow, but the entire batch is not executed
+ // atomically.
+ rpc MutateRows(MutateRowsRequest) returns (stream MutateRowsResponse) {
+ option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows" body: "*" };
+ }
+
+ // Mutates a row atomically based on the output of a predicate Reader filter.
+ rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) {
+ option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow" body: "*" };
+ }
+
+ // Modifies a row atomically. The method reads the latest existing timestamp
+ // and value from the specified columns and writes a new entry based on
+ // pre-defined read/modify/write rules. The new value for the timestamp is the
+ // greater of the existing timestamp or the current server time. The method
+ // returns the new contents of all modified cells.
+ rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (ReadModifyWriteRowResponse) {
+ option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow" body: "*" };
+ }
+}
+
+// Request message for Bigtable.ReadRows.
+message ReadRowsRequest {
+ // The unique name of the table from which to read.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/tables/<table>`.
+ string table_name = 1;
+
+ // The row keys and/or ranges to read. If not specified, reads from all rows.
+ RowSet rows = 2;
+
+ // The filter to apply to the contents of the specified row(s). If unset,
+ // reads the entirety of each row.
+ RowFilter filter = 3;
+
+ // The read will terminate after committing to N rows' worth of results. The
+ // default (zero) is to return all results.
+ int64 rows_limit = 4;
+}
+
+// Response message for Bigtable.ReadRows.
+message ReadRowsResponse {
+ // Specifies a piece of a row's contents returned as part of the read
+ // response stream.
+ message CellChunk {
+ // The row key for this chunk of data. If the row key is empty,
+ // this CellChunk is a continuation of the same row as the previous
+ // CellChunk in the response stream, even if that CellChunk was in a
+ // previous ReadRowsResponse message.
+ bytes row_key = 1;
+
+ // The column family name for this chunk of data. If this message
+ // is not present this CellChunk is a continuation of the same column
+ // family as the previous CellChunk. The empty string can occur as a
+ // column family name in a response so clients must check
+ // explicitly for the presence of this message, not just for
+ // `family_name.value` being non-empty.
+ google.protobuf.StringValue family_name = 2;
+
+ // The column qualifier for this chunk of data. If this message
+ // is not present, this CellChunk is a continuation of the same column
+ // as the previous CellChunk. Column qualifiers may be empty so
+ // clients must check for the presence of this message, not just
+ // for `qualifier.value` being non-empty.
+ google.protobuf.BytesValue qualifier = 3;
+
+ // The cell's stored timestamp, which also uniquely identifies it
+ // within its column. Values are always expressed in
+ // microseconds, but individual tables may set a coarser
+ // granularity to further restrict the allowed values. For
+ // example, a table which specifies millisecond granularity will
+ // only allow values of `timestamp_micros` which are multiples of
+ // 1000. Timestamps are only set in the first CellChunk per cell
+ // (for cells split into multiple chunks).
+ int64 timestamp_micros = 4;
+
+ // Labels applied to the cell by a
+ // [RowFilter][google.bigtable.v2.RowFilter]. Labels are only set
+ // on the first CellChunk per cell.
+ repeated string labels = 5;
+
+ // The value stored in the cell. Cell values can be split across
+ // multiple CellChunks. In that case only the value field will be
+ // set in CellChunks after the first: the timestamp and labels
+ // will only be present in the first CellChunk, even if the first
+ // CellChunk came in a previous ReadRowsResponse.
+ bytes value = 6;
+
+ // If this CellChunk is part of a chunked cell value and this is
+ // not the final chunk of that cell, value_size will be set to the
+ // total length of the cell value. The client can use this size
+ // to pre-allocate memory to hold the full cell value.
+ int32 value_size = 7;
+
+ oneof row_status {
+ // Indicates that the client should drop all previous chunks for
+ // `row_key`, as it will be re-read from the beginning.
+ bool reset_row = 8;
+
+ // Indicates that the client can safely process all previous chunks for
+ // `row_key`, as its data has been fully read.
+ bool commit_row = 9;
+ }
+ }
+
+ repeated CellChunk chunks = 1;
+
+ // Optionally the server might return the row key of the last row it
+ // has scanned. The client can use this to construct a more
+ // efficient retry request if needed: any row keys or portions of
+ // ranges less than this row key can be dropped from the request.
+ // This is primarily useful for cases where the server has read a
+ // lot of data that was filtered out since the last committed row
+ // key, allowing the client to skip that work on a retry.
+ bytes last_scanned_row_key = 2;
+}
+
+// Request message for Bigtable.SampleRowKeys.
+message SampleRowKeysRequest {
+ // The unique name of the table from which to sample row keys.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/tables/<table>`.
+ string table_name = 1;
+}
+
+// Response message for Bigtable.SampleRowKeys.
+message SampleRowKeysResponse {
+ // Sorted streamed sequence of sample row keys in the table. The table might
+ // have contents before the first row key in the list and after the last one,
+ // but a key containing the empty string indicates "end of table" and will be
+ // the last response given, if present.
+ // Note that row keys in this list may not have ever been written to or read
+ // from, and users should therefore not make any assumptions about the row key
+ // structure that are specific to their use case.
+ bytes row_key = 1;
+
+ // Approximate total storage space used by all rows in the table which precede
+ // `row_key`. Buffering the contents of all rows between two subsequent
+ // samples would require space roughly equal to the difference in their
+ // `offset_bytes` fields.
+ int64 offset_bytes = 2;
+}
+
+// Request message for Bigtable.MutateRow.
+message MutateRowRequest {
+ // The unique name of the table to which the mutation should be applied.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/tables/<table>`.
+ string table_name = 1;
+
+ // The key of the row to which the mutation should be applied.
+ bytes row_key = 2;
+
+ // Changes to be atomically applied to the specified row. Entries are applied
+ // in order, meaning that earlier mutations can be masked by later ones.
+ // Must contain at least one entry and at most 100000.
+ repeated Mutation mutations = 3;
+}
+
+// Response message for Bigtable.MutateRow.
+message MutateRowResponse {
+
+}
+
+// Request message for BigtableService.MutateRows.
+message MutateRowsRequest {
+ message Entry {
+ // The key of the row to which the `mutations` should be applied.
+ bytes row_key = 1;
+
+ // Changes to be atomically applied to the specified row. Mutations are
+ // applied in order, meaning that earlier mutations can be masked by
+ // later ones.
+ // You must specify at least one mutation.
+ repeated Mutation mutations = 2;
+ }
+
+ // The unique name of the table to which the mutations should be applied.
+ string table_name = 1;
+
+ // The row keys and corresponding mutations to be applied in bulk.
+ // Each entry is applied as an atomic mutation, but the entries may be
+ // applied in arbitrary order (even between entries for the same row).
+ // At least one entry must be specified, and in total the entries can
+ // contain at most 100000 mutations.
+ repeated Entry entries = 2;
+}
+
+// Response message for BigtableService.MutateRows.
+message MutateRowsResponse {
+ message Entry {
+ // The index into the original request's `entries` list of the Entry
+ // for which a result is being reported.
+ int64 index = 1;
+
+ // The result of the request Entry identified by `index`.
+ // Depending on how requests are batched during execution, it is possible
+ // for one Entry to fail due to an error with another Entry. In the event
+ // that this occurs, the same error will be reported for both entries.
+ google.rpc.Status status = 2;
+ }
+
+ // One or more results for Entries from the batch request.
+ repeated Entry entries = 1;
+}
+
+// Request message for Bigtable.CheckAndMutateRow.
+message CheckAndMutateRowRequest {
+ // The unique name of the table to which the conditional mutation should be
+ // applied.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/tables/<table>`.
+ string table_name = 1;
+
+ // The key of the row to which the conditional mutation should be applied.
+ bytes row_key = 2;
+
+ // The filter to be applied to the contents of the specified row. Depending
+ // on whether or not any results are yielded, either `true_mutations` or
+ // `false_mutations` will be executed. If unset, checks that the row contains
+ // any values at all.
+ RowFilter predicate_filter = 6;
+
+ // Changes to be atomically applied to the specified row if `predicate_filter`
+ // yields at least one cell when applied to `row_key`. Entries are applied in
+ // order, meaning that earlier mutations can be masked by later ones.
+ // Must contain at least one entry if `false_mutations` is empty, and at most
+ // 100000.
+ repeated Mutation true_mutations = 4;
+
+ // Changes to be atomically applied to the specified row if `predicate_filter`
+ // does not yield any cells when applied to `row_key`. Entries are applied in
+ // order, meaning that earlier mutations can be masked by later ones.
+ // Must contain at least one entry if `true_mutations` is empty, and at most
+ // 100000.
+ repeated Mutation false_mutations = 5;
+}
+
+// Response message for Bigtable.CheckAndMutateRow.
+message CheckAndMutateRowResponse {
+ // Whether or not the request's `predicate_filter` yielded any results for
+ // the specified row.
+ bool predicate_matched = 1;
+}
+
+// Request message for Bigtable.ReadModifyWriteRow.
+message ReadModifyWriteRowRequest {
+ // The unique name of the table to which the read/modify/write rules should be
+ // applied.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/tables/<table>`.
+ string table_name = 1;
+
+ // The key of the row to which the read/modify/write rules should be applied.
+ bytes row_key = 2;
+
+ // Rules specifying how the specified row's contents are to be transformed
+ // into writes. Entries are applied in order, meaning that earlier rules will
+ // affect the results of later ones.
+ repeated ReadModifyWriteRule rules = 3;
+}
+
+// Response message for Bigtable.ReadModifyWriteRow.
+message ReadModifyWriteRowResponse {
+ // A Row containing the new contents of all cells modified by the request.
+ Row row = 1;
+}
diff --git a/third_party/googleapis/google/bigtable/v2/bigtable_gapic.yaml b/third_party/googleapis/google/bigtable/v2/bigtable_gapic.yaml
new file mode 100644
index 0000000000..24a067fd5e
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/v2/bigtable_gapic.yaml
@@ -0,0 +1,137 @@
+type: com.google.api.codegen.ConfigProto
+language_settings:
+ java:
+ package_name: com.google.cloud.bigtable.spi.v2
+ python:
+ package_name: google.cloud.gapic.bigtable.v2
+ go:
+ package_name: cloud.google.com/go/bigtable/apiv2
+ csharp:
+ package_name: Google.Bigtable.V2
+ ruby:
+ package_name: Google::Cloud::Bigtable::V2
+ php:
+ package_name: Google\Cloud\Bigtable\V2
+ nodejs:
+ package_name: bigtable.v2
+ domain_layer_location: google-cloud
+license_header:
+ copyright_file: copyright-google.txt
+ license_file: license-header-apache-2.0.txt
+interfaces:
+- name: google.bigtable.v2.Bigtable
+ collections:
+ - name_pattern: projects/{project}/instances/{instance}/tables/{table}
+ entity_name: table
+ retry_codes_def:
+ - name: idempotent
+ retry_codes:
+ - UNAVAILABLE
+ - DEADLINE_EXCEEDED
+ - name: non_idempotent
+ retry_codes:
+ - UNAVAILABLE
+ retry_params_def:
+ - name: default
+ initial_retry_delay_millis: 100
+ retry_delay_multiplier: 1.3
+ max_retry_delay_millis: 60000
+ initial_rpc_timeout_millis: 20000
+ rpc_timeout_multiplier: 1
+ max_rpc_timeout_millis: 20000
+ total_timeout_millis: 600000
+ methods:
+ - name: ReadRows
+ flattening:
+ groups:
+ - parameters:
+ - table_name
+ required_fields:
+ - table_name
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ table_name: table
+ timeout_millis: 60000
+ - name: SampleRowKeys
+ flattening:
+ groups:
+ - parameters:
+ - table_name
+ required_fields:
+ - table_name
+ request_object_method: false
+ retry_codes_name: idempotent
+ retry_params_name: default
+ field_name_patterns:
+ table_name: table
+ timeout_millis: 60000
+ - name: MutateRow
+ flattening:
+ groups:
+ - parameters:
+ - table_name
+ - row_key
+ - mutations
+ required_fields:
+ - table_name
+ - row_key
+ - mutations
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ table_name: table
+ timeout_millis: 60000
+ - name: MutateRows
+ flattening:
+ groups:
+ - parameters:
+ - table_name
+ - entries
+ required_fields:
+ - table_name
+ - entries
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ table_name: table
+ timeout_millis: 60000
+ - name: CheckAndMutateRow
+ flattening:
+ groups:
+ - parameters:
+ - table_name
+ - row_key
+ - true_mutations
+ - false_mutations
+ # Note that one of {true_mutations,false_mutations} must be specified, but
+ # since they are not both required, we leave them as optional params.
+ required_fields:
+ - table_name
+ - row_key
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ table_name: table
+ timeout_millis: 60000
+ - name: ReadModifyWriteRow
+ flattening:
+ groups:
+ - parameters:
+ - table_name
+ - row_key
+ - rules
+ required_fields:
+ - table_name
+ - row_key
+ - rules
+ request_object_method: true
+ retry_codes_name: non_idempotent
+ retry_params_name: default
+ field_name_patterns:
+ table_name: table
+ timeout_millis: 60000
diff --git a/third_party/googleapis/google/bigtable/v2/data.proto b/third_party/googleapis/google/bigtable/v2/data.proto
new file mode 100644
index 0000000000..b9eab6f7c1
--- /dev/null
+++ b/third_party/googleapis/google/bigtable/v2/data.proto
@@ -0,0 +1,533 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bigtable.v2;
+
+option go_package = "google.golang.org/genproto/googleapis/bigtable/v2;bigtable";
+option java_multiple_files = true;
+option java_outer_classname = "DataProto";
+option java_package = "com.google.bigtable.v2";
+
+
+// Specifies the complete (requested) contents of a single row of a table.
+// Rows which exceed 256MiB in size cannot be read in full.
+message Row {
+ // The unique key which identifies this row within its table. This is the same
+ // key that's used to identify the row in, for example, a MutateRowRequest.
+ // May contain any non-empty byte string up to 4KiB in length.
+ bytes key = 1;
+
+ // May be empty, but only if the entire row is empty.
+ // The mutual ordering of column families is not specified.
+ repeated Family families = 2;
+}
+
+// Specifies (some of) the contents of a single row/column family intersection
+// of a table.
+message Family {
+ // The unique key which identifies this family within its row. This is the
+ // same key that's used to identify the family in, for example, a RowFilter
+ // which sets its "family_name_regex_filter" field.
+ // Must match `[-_.a-zA-Z0-9]+`, except that AggregatingRowProcessors may
+ // produce cells in a sentinel family with an empty name.
+ // Must be no greater than 64 characters in length.
+ string name = 1;
+
+ // Must not be empty. Sorted in order of increasing "qualifier".
+ repeated Column columns = 2;
+}
+
+// Specifies (some of) the contents of a single row/column intersection of a
+// table.
+message Column {
+ // The unique key which identifies this column within its family. This is the
+ // same key that's used to identify the column in, for example, a RowFilter
+ // which sets its `column_qualifier_regex_filter` field.
+ // May contain any byte string, including the empty string, up to 16kiB in
+ // length.
+ bytes qualifier = 1;
+
+ // Must not be empty. Sorted in order of decreasing "timestamp_micros".
+ repeated Cell cells = 2;
+}
+
+// Specifies (some of) the contents of a single row/column/timestamp of a table.
+message Cell {
+ // The cell's stored timestamp, which also uniquely identifies it within
+ // its column.
+ // Values are always expressed in microseconds, but individual tables may set
+ // a coarser granularity to further restrict the allowed values. For
+ // example, a table which specifies millisecond granularity will only allow
+ // values of `timestamp_micros` which are multiples of 1000.
+ int64 timestamp_micros = 1;
+
+ // The value stored in the cell.
+ // May contain any byte string, including the empty string, up to 100MiB in
+ // length.
+ bytes value = 2;
+
+ // Labels applied to the cell by a [RowFilter][google.bigtable.v2.RowFilter].
+ repeated string labels = 3;
+}
+
+// Specifies a contiguous range of rows.
+message RowRange {
+ // The row key at which to start the range.
+ // If neither field is set, interpreted as the empty string, inclusive.
+ oneof start_key {
+ // Used when giving an inclusive lower bound for the range.
+ bytes start_key_closed = 1;
+
+ // Used when giving an exclusive lower bound for the range.
+ bytes start_key_open = 2;
+ }
+
+ // The row key at which to end the range.
+ // If neither field is set, interpreted as the infinite row key, exclusive.
+ oneof end_key {
+ // Used when giving an exclusive upper bound for the range.
+ bytes end_key_open = 3;
+
+ // Used when giving an inclusive upper bound for the range.
+ bytes end_key_closed = 4;
+ }
+}
+
+// Specifies a non-contiguous set of rows.
+message RowSet {
+ // Single rows included in the set.
+ repeated bytes row_keys = 1;
+
+ // Contiguous row ranges included in the set.
+ repeated RowRange row_ranges = 2;
+}
+
+// Specifies a contiguous range of columns within a single column family.
+// The range spans from &lt;column_family&gt;:&lt;start_qualifier&gt; to
+// &lt;column_family&gt;:&lt;end_qualifier&gt;, where both bounds can be either
+// inclusive or exclusive.
+message ColumnRange {
+ // The name of the column family within which this range falls.
+ string family_name = 1;
+
+ // The column qualifier at which to start the range (within `column_family`).
+ // If neither field is set, interpreted as the empty string, inclusive.
+ oneof start_qualifier {
+ // Used when giving an inclusive lower bound for the range.
+ bytes start_qualifier_closed = 2;
+
+ // Used when giving an exclusive lower bound for the range.
+ bytes start_qualifier_open = 3;
+ }
+
+ // The column qualifier at which to end the range (within `column_family`).
+ // If neither field is set, interpreted as the infinite string, exclusive.
+ oneof end_qualifier {
+ // Used when giving an inclusive upper bound for the range.
+ bytes end_qualifier_closed = 4;
+
+ // Used when giving an exclusive upper bound for the range.
+ bytes end_qualifier_open = 5;
+ }
+}
+
+// Specified a contiguous range of microsecond timestamps.
+message TimestampRange {
+ // Inclusive lower bound. If left empty, interpreted as 0.
+ int64 start_timestamp_micros = 1;
+
+ // Exclusive upper bound. If left empty, interpreted as infinity.
+ int64 end_timestamp_micros = 2;
+}
+
+// Specifies a contiguous range of raw byte values.
+message ValueRange {
+ // The value at which to start the range.
+ // If neither field is set, interpreted as the empty string, inclusive.
+ oneof start_value {
+ // Used when giving an inclusive lower bound for the range.
+ bytes start_value_closed = 1;
+
+ // Used when giving an exclusive lower bound for the range.
+ bytes start_value_open = 2;
+ }
+
+ // The value at which to end the range.
+ // If neither field is set, interpreted as the infinite string, exclusive.
+ oneof end_value {
+ // Used when giving an inclusive upper bound for the range.
+ bytes end_value_closed = 3;
+
+ // Used when giving an exclusive upper bound for the range.
+ bytes end_value_open = 4;
+ }
+}
+
+// Takes a row as input and produces an alternate view of the row based on
+// specified rules. For example, a RowFilter might trim down a row to include
+// just the cells from columns matching a given regular expression, or might
+// return all the cells of a row but not their values. More complicated filters
+// can be composed out of these components to express requests such as, "within
+// every column of a particular family, give just the two most recent cells
+// which are older than timestamp X."
+//
+// There are two broad categories of RowFilters (true filters and transformers),
+// as well as two ways to compose simple filters into more complex ones
+// (chains and interleaves). They work as follows:
+//
+// * True filters alter the input row by excluding some of its cells wholesale
+// from the output row. An example of a true filter is the `value_regex_filter`,
+// which excludes cells whose values don't match the specified pattern. All
+// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax)
+// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An
+// important point to keep in mind is that `RE2(.)` is equivalent by default to
+// `RE2([^\n])`, meaning that it does not match newlines. When attempting to
+// match an arbitrary byte, you should therefore use the escape sequence `\C`,
+// which may need to be further escaped as `\\C` in your client language.
+//
+// * Transformers alter the input row by changing the values of some of its
+// cells in the output, without excluding them completely. Currently, the only
+// supported transformer is the `strip_value_transformer`, which replaces every
+// cell's value with the empty string.
+//
+// * Chains and interleaves are described in more detail in the
+// RowFilter.Chain and RowFilter.Interleave documentation.
+//
+// The total serialized size of a RowFilter message must not
+// exceed 4096 bytes, and RowFilters may not be nested within each other
+// (in Chains or Interleaves) to a depth of more than 20.
+message RowFilter {
+ // A RowFilter which sends rows through several RowFilters in sequence.
+ message Chain {
+ // The elements of "filters" are chained together to process the input row:
+ // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row
+ // The full chain is executed atomically.
+ repeated RowFilter filters = 1;
+ }
+
+ // A RowFilter which sends each row to each of several component
+ // RowFilters and interleaves the results.
+ message Interleave {
+ // The elements of "filters" all process a copy of the input row, and the
+ // results are pooled, sorted, and combined into a single output row.
+ // If multiple cells are produced with the same column and timestamp,
+ // they will all appear in the output row in an unspecified mutual order.
+ // Consider the following example, with three filters:
+ //
+ // input row
+ // |
+ // -----------------------------------------------------
+ // | | |
+ // f(0) f(1) f(2)
+ // | | |
+ // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a
+ // 2: foo,blah,11,z far,blah,5,x far,blah,5,x
+ // | | |
+ // -----------------------------------------------------
+ // |
+ // 1: foo,bar,10,z // could have switched with #2
+ // 2: foo,bar,10,x // could have switched with #1
+ // 3: foo,blah,11,z
+ // 4: far,bar,7,a
+ // 5: far,blah,5,x // identical to #6
+ // 6: far,blah,5,x // identical to #5
+ //
+ // All interleaved filters are executed atomically.
+ repeated RowFilter filters = 1;
+ }
+
+ // A RowFilter which evaluates one of two possible RowFilters, depending on
+ // whether or not a predicate RowFilter outputs any cells from the input row.
+ //
+ // IMPORTANT NOTE: The predicate filter does not execute atomically with the
+ // true and false filters, which may lead to inconsistent or unexpected
+ // results. Additionally, Condition filters have poor performance, especially
+ // when filters are set for the false condition.
+ message Condition {
+ // If `predicate_filter` outputs any cells, then `true_filter` will be
+ // evaluated on the input row. Otherwise, `false_filter` will be evaluated.
+ RowFilter predicate_filter = 1;
+
+ // The filter to apply to the input row if `predicate_filter` returns any
+ // results. If not provided, no results will be returned in the true case.
+ RowFilter true_filter = 2;
+
+ // The filter to apply to the input row if `predicate_filter` does not
+ // return any results. If not provided, no results will be returned in the
+ // false case.
+ RowFilter false_filter = 3;
+ }
+
+ // Which of the possible RowFilter types to apply. If none are set, this
+ // RowFilter returns all cells in the input row.
+ oneof filter {
+ // Applies several RowFilters to the data in sequence, progressively
+ // narrowing the results.
+ Chain chain = 1;
+
+ // Applies several RowFilters to the data in parallel and combines the
+ // results.
+ Interleave interleave = 2;
+
+ // Applies one of two possible RowFilters to the data based on the output of
+ // a predicate RowFilter.
+ Condition condition = 3;
+
+ // ADVANCED USE ONLY.
+ // Hook for introspection into the RowFilter. Outputs all cells directly to
+ // the output of the read rather than to any parent filter. Consider the
+ // following example:
+ //
+ // Chain(
+ // FamilyRegex("A"),
+ // Interleave(
+ // All(),
+ // Chain(Label("foo"), Sink())
+ // ),
+ // QualifierRegex("B")
+ // )
+ //
+ // A,A,1,w
+ // A,B,2,x
+ // B,B,4,z
+ // |
+ // FamilyRegex("A")
+ // |
+ // A,A,1,w
+ // A,B,2,x
+ // |
+ // +------------+-------------+
+ // | |
+ // All() Label(foo)
+ // | |
+ // A,A,1,w A,A,1,w,labels:[foo]
+ // A,B,2,x A,B,2,x,labels:[foo]
+ // | |
+ // | Sink() --------------+
+ // | | |
+ // +------------+ x------+ A,A,1,w,labels:[foo]
+ // | A,B,2,x,labels:[foo]
+ // A,A,1,w |
+ // A,B,2,x |
+ // | |
+ // QualifierRegex("B") |
+ // | |
+ // A,B,2,x |
+ // | |
+ // +--------------------------------+
+ // |
+ // A,A,1,w,labels:[foo]
+ // A,B,2,x,labels:[foo] // could be switched
+ // A,B,2,x // could be switched
+ //
+ // Despite being excluded by the qualifier filter, a copy of every cell
+ // that reaches the sink is present in the final result.
+ //
+ // As with an [Interleave][google.bigtable.v2.RowFilter.Interleave],
+ // duplicate cells are possible, and appear in an unspecified mutual order.
+ // In this case we have a duplicate with column "A:B" and timestamp 2,
+ // because one copy passed through the all filter while the other was
+ // passed through the label and sink. Note that one copy has label "foo",
+ // while the other does not.
+ //
+ // Cannot be used within the `predicate_filter`, `true_filter`, or
+ // `false_filter` of a [Condition][google.bigtable.v2.RowFilter.Condition].
+ bool sink = 16;
+
+ // Matches all cells, regardless of input. Functionally equivalent to
+ // leaving `filter` unset, but included for completeness.
+ bool pass_all_filter = 17;
+
+ // Does not match any cells, regardless of input. Useful for temporarily
+ // disabling just part of a filter.
+ bool block_all_filter = 18;
+
+ // Matches only cells from rows whose keys satisfy the given RE2 regex. In
+ // other words, passes through the entire row when the key matches, and
+ // otherwise produces an empty row.
+ // Note that, since row keys can contain arbitrary bytes, the `\C` escape
+ // sequence must be used if a true wildcard is desired. The `.` character
+ // will not match the new line character `\n`, which may be present in a
+ // binary key.
+ bytes row_key_regex_filter = 4;
+
+ // Matches all cells from a row with probability p, and matches no cells
+ // from the row with probability 1-p.
+ double row_sample_filter = 14;
+
+ // Matches only cells from columns whose families satisfy the given RE2
+ // regex. For technical reasons, the regex must not contain the `:`
+ // character, even if it is not being used as a literal.
+ // Note that, since column families cannot contain the new line character
+ // `\n`, it is sufficient to use `.` as a full wildcard when matching
+ // column family names.
+ string family_name_regex_filter = 5;
+
+ // Matches only cells from columns whose qualifiers satisfy the given RE2
+ // regex.
+ // Note that, since column qualifiers can contain arbitrary bytes, the `\C`
+ // escape sequence must be used if a true wildcard is desired. The `.`
+ // character will not match the new line character `\n`, which may be
+ // present in a binary qualifier.
+ bytes column_qualifier_regex_filter = 6;
+
+ // Matches only cells from columns within the given range.
+ ColumnRange column_range_filter = 7;
+
+ // Matches only cells with timestamps within the given range.
+ TimestampRange timestamp_range_filter = 8;
+
+ // Matches only cells with values that satisfy the given regular expression.
+ // Note that, since cell values can contain arbitrary bytes, the `\C` escape
+ // sequence must be used if a true wildcard is desired. The `.` character
+ // will not match the new line character `\n`, which may be present in a
+ // binary value.
+ bytes value_regex_filter = 9;
+
+ // Matches only cells with values that fall within the given range.
+ ValueRange value_range_filter = 15;
+
+ // Skips the first N cells of each row, matching all subsequent cells.
+ // If duplicate cells are present, as is possible when using an Interleave,
+ // each copy of the cell is counted separately.
+ int32 cells_per_row_offset_filter = 10;
+
+ // Matches only the first N cells of each row.
+ // If duplicate cells are present, as is possible when using an Interleave,
+ // each copy of the cell is counted separately.
+ int32 cells_per_row_limit_filter = 11;
+
+ // Matches only the most recent N cells within each column. For example,
+ // if N=2, this filter would match column `foo:bar` at timestamps 10 and 9,
+ // skip all earlier cells in `foo:bar`, and then begin matching again in
+ // column `foo:bar2`.
+ // If duplicate cells are present, as is possible when using an Interleave,
+ // each copy of the cell is counted separately.
+ int32 cells_per_column_limit_filter = 12;
+
+ // Replaces each cell's value with the empty string.
+ bool strip_value_transformer = 13;
+
+ // Applies the given label to all cells in the output row. This allows
+ // the client to determine which results were produced from which part of
+ // the filter.
+ //
+ // Values must be at most 15 characters in length, and match the RE2
+ // pattern `[a-z0-9\\-]+`
+ //
+ // Due to a technical limitation, it is not currently possible to apply
+ // multiple labels to a cell. As a result, a Chain may have no more than
+ // one sub-filter which contains a `apply_label_transformer`. It is okay for
+ // an Interleave to contain multiple `apply_label_transformers`, as they
+ // will be applied to separate copies of the input. This may be relaxed in
+ // the future.
+ string apply_label_transformer = 19;
+ }
+}
+
+// Specifies a particular change to be made to the contents of a row.
+message Mutation {
+ // A Mutation which sets the value of the specified cell.
+ message SetCell {
+ // The name of the family into which new data should be written.
+ // Must match `[-_.a-zA-Z0-9]+`
+ string family_name = 1;
+
+ // The qualifier of the column into which new data should be written.
+ // Can be any byte string, including the empty string.
+ bytes column_qualifier = 2;
+
+ // The timestamp of the cell into which new data should be written.
+ // Use -1 for current Bigtable server time.
+ // Otherwise, the client should set this value itself, noting that the
+ // default value is a timestamp of zero if the field is left unspecified.
+ // Values must match the granularity of the table (e.g. micros, millis).
+ int64 timestamp_micros = 3;
+
+ // The value to be written into the specified cell.
+ bytes value = 4;
+ }
+
+ // A Mutation which deletes cells from the specified column, optionally
+ // restricting the deletions to a given timestamp range.
+ message DeleteFromColumn {
+ // The name of the family from which cells should be deleted.
+ // Must match `[-_.a-zA-Z0-9]+`
+ string family_name = 1;
+
+ // The qualifier of the column from which cells should be deleted.
+ // Can be any byte string, including the empty string.
+ bytes column_qualifier = 2;
+
+ // The range of timestamps within which cells should be deleted.
+ TimestampRange time_range = 3;
+ }
+
+ // A Mutation which deletes all cells from the specified column family.
+ message DeleteFromFamily {
+ // The name of the family from which cells should be deleted.
+ // Must match `[-_.a-zA-Z0-9]+`
+ string family_name = 1;
+ }
+
+ // A Mutation which deletes all cells from the containing row.
+ message DeleteFromRow {
+
+ }
+
+ // Which of the possible Mutation types to apply.
+ oneof mutation {
+ // Set a cell's value.
+ SetCell set_cell = 1;
+
+ // Deletes cells from a column.
+ DeleteFromColumn delete_from_column = 2;
+
+ // Deletes cells from a column family.
+ DeleteFromFamily delete_from_family = 3;
+
+ // Deletes cells from the entire row.
+ DeleteFromRow delete_from_row = 4;
+ }
+}
+
+// Specifies an atomic read/modify/write operation on the latest value of the
+// specified column.
+message ReadModifyWriteRule {
+ // The name of the family to which the read/modify/write should be applied.
+ // Must match `[-_.a-zA-Z0-9]+`
+ string family_name = 1;
+
+ // The qualifier of the column to which the read/modify/write should be
+ // applied.
+ // Can be any byte string, including the empty string.
+ bytes column_qualifier = 2;
+
+ // The rule used to determine the column's new latest value from its current
+ // latest value.
+ oneof rule {
+ // Rule specifying that `append_value` be appended to the existing value.
+ // If the targeted cell is unset, it will be treated as containing the
+ // empty string.
+ bytes append_value = 3;
+
+ // Rule specifying that `increment_amount` be added to the existing value.
+ // If the targeted cell is unset, it will be treated as containing a zero.
+ // Otherwise, the targeted cell must contain an 8-byte value (interpreted
+ // as a 64-bit big-endian signed integer), or the entire request will fail.
+ int64 increment_amount = 4;
+ }
+}