aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/bigtable
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-07-02 11:38:06 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-07-02 11:40:56 -0700
commit2e764644d6a9fe4a21e4c35e4a25677411e8e101 (patch)
tree1d6b6419b40255aab0c819867ff5da841b7cc829 /tensorflow/contrib/bigtable
parent7b5f6a4e37c030c6f3aeb58924a76072ac3f784c (diff)
Update to latest version of Cloud Bigtable C++ Client.
PiperOrigin-RevId: 202986386
Diffstat (limited to 'tensorflow/contrib/bigtable')
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_kernels.cc46
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_lib.h30
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_lookup_dataset_op.cc15
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_prefix_key_dataset_op.cc15
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_range_key_dataset_op.cc17
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_scan_dataset_op.cc29
-rw-r--r--tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client.h2
-rw-r--r--tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client_op.cc33
-rw-r--r--tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client_test.cc81
9 files changed, 146 insertions, 122 deletions
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_kernels.cc b/tensorflow/contrib/bigtable/kernels/bigtable_kernels.cc
index 0c81951d56..8a7309e870 100644
--- a/tensorflow/contrib/bigtable/kernels/bigtable_kernels.cc
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_kernels.cc
@@ -51,18 +51,19 @@ class BigtableClientOp : public OpKernel {
OP_REQUIRES_OK(ctx, cinfo_.Init(mgr, def()));
BigtableClientResource* resource;
OP_REQUIRES_OK(
- ctx, mgr->LookupOrCreate<BigtableClientResource>(
- cinfo_.container(), cinfo_.name(), &resource,
- [this, ctx](BigtableClientResource** ret)
- EXCLUSIVE_LOCKS_REQUIRED(mu_) {
- std::shared_ptr<bigtable::DataClient> client =
- bigtable::CreateDefaultDataClient(
- project_id_, instance_id_,
- bigtable::ClientOptions());
- *ret = new BigtableClientResource(
- project_id_, instance_id_, std::move(client));
- return Status::OK();
- }));
+ ctx,
+ mgr->LookupOrCreate<BigtableClientResource>(
+ cinfo_.container(), cinfo_.name(), &resource,
+ [this, ctx](
+ BigtableClientResource** ret) EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ std::shared_ptr<google::cloud::bigtable::DataClient> client =
+ google::cloud::bigtable::CreateDefaultDataClient(
+ project_id_, instance_id_,
+ google::cloud::bigtable::ClientOptions());
+ *ret = new BigtableClientResource(project_id_, instance_id_,
+ std::move(client));
+ return Status::OK();
+ }));
core::ScopedUnref resource_cleanup(resource);
initialized_ = true;
}
@@ -210,7 +211,7 @@ class ToBigtableOp : public AsyncOpKernel {
components.reserve(dataset->output_dtypes().size());
bool end_of_sequence = false;
do {
- ::bigtable::BulkMutation mutation;
+ ::google::cloud::bigtable::BulkMutation mutation;
// TODO(saeta): Make # of mutations configurable.
for (uint64 i = 0; i < 100 && !end_of_sequence; ++i) {
OP_REQUIRES_OK_ASYNC(
@@ -226,7 +227,7 @@ class ToBigtableOp : public AsyncOpKernel {
components.clear();
}
grpc::Status mutation_status;
- std::vector<::bigtable::FailedMutation> failures =
+ std::vector<::google::cloud::bigtable::FailedMutation> failures =
resource->table().BulkApply(std::move(mutation), mutation_status);
if (!failures.empty()) {
for (const auto& failure : failures) {
@@ -267,24 +268,23 @@ class ToBigtableOp : public AsyncOpKernel {
return clean;
}
- Status CreateMutation(std::vector<Tensor> tensors,
- const std::vector<string>& column_families,
- const std::vector<string>& columns,
- std::chrono::milliseconds timestamp,
- ::bigtable::BulkMutation* bulk_mutation) {
+ Status CreateMutation(
+ std::vector<Tensor> tensors, const std::vector<string>& column_families,
+ const std::vector<string>& columns, std::chrono::milliseconds timestamp,
+ ::google::cloud::bigtable::BulkMutation* bulk_mutation) {
if (tensors.size() != column_families.size() + 1) {
return errors::InvalidArgument(
"Iterator produced a set of Tensors shorter than expected");
}
- ::bigtable::SingleRowMutation mutation(
+ ::google::cloud::bigtable::SingleRowMutation mutation(
std::move(tensors[0].scalar<string>()()));
for (size_t i = 1; i < tensors.size(); ++i) {
if (!TensorShapeUtils::IsScalar(tensors[i].shape())) {
return errors::Internal("Output tensor ", i, " was not a scalar");
}
- mutation.emplace_back(
- ::bigtable::SetCell(column_families[i - 1], columns[i - 1], timestamp,
- std::move(tensors[i].scalar<string>()())));
+ mutation.emplace_back(::google::cloud::bigtable::SetCell(
+ column_families[i - 1], columns[i - 1], timestamp,
+ std::move(tensors[i].scalar<string>()())));
}
bulk_mutation->emplace_back(std::move(mutation));
return Status::OK();
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_lib.h b/tensorflow/contrib/bigtable/kernels/bigtable_lib.h
index 54303cdc5e..12d8256dea 100644
--- a/tensorflow/contrib/bigtable/kernels/bigtable_lib.h
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_lib.h
@@ -31,13 +31,16 @@ string RegexFromStringSet(const std::vector<string>& strs);
class BigtableClientResource : public ResourceBase {
public:
- BigtableClientResource(string project_id, string instance_id,
- std::shared_ptr<bigtable::DataClient> client)
+ BigtableClientResource(
+ string project_id, string instance_id,
+ std::shared_ptr<google::cloud::bigtable::DataClient> client)
: project_id_(std::move(project_id)),
instance_id_(std::move(instance_id)),
client_(std::move(client)) {}
- std::shared_ptr<bigtable::DataClient> get_client() { return client_; }
+ std::shared_ptr<google::cloud::bigtable::DataClient> get_client() {
+ return client_;
+ }
string DebugString() override {
return strings::StrCat("BigtableClientResource(project_id: ", project_id_,
@@ -47,7 +50,7 @@ class BigtableClientResource : public ResourceBase {
private:
const string project_id_;
const string instance_id_;
- std::shared_ptr<bigtable::DataClient> client_;
+ std::shared_ptr<google::cloud::bigtable::DataClient> client_;
};
class BigtableTableResource : public ResourceBase {
@@ -61,7 +64,7 @@ class BigtableTableResource : public ResourceBase {
~BigtableTableResource() override { client_->Unref(); }
- ::bigtable::noex::Table& table() { return table_; }
+ ::google::cloud::bigtable::noex::Table& table() { return table_; }
string DebugString() override {
return strings::StrCat(
@@ -72,7 +75,7 @@ class BigtableTableResource : public ResourceBase {
private:
BigtableClientResource* client_; // Ownes one ref.
const string table_name_;
- ::bigtable::noex::Table table_;
+ ::google::cloud::bigtable::noex::Table table_;
};
// BigtableReaderDatasetIterator is an abstract class for iterators from
@@ -98,7 +101,7 @@ class BigtableReaderDatasetIterator : public DatasetIterator<Dataset> {
return GrpcStatusToTfStatus(status);
}
*end_of_sequence = false;
- bigtable::Row& row = *iterator_;
+ google::cloud::bigtable::Row& row = *iterator_;
Status s = ParseRow(ctx, row, out_tensors);
// Ensure we always advance.
++iterator_;
@@ -106,9 +109,10 @@ class BigtableReaderDatasetIterator : public DatasetIterator<Dataset> {
}
protected:
- virtual ::bigtable::RowRange MakeRowRange() = 0;
- virtual ::bigtable::Filter MakeFilter() = 0;
- virtual Status ParseRow(IteratorContext* ctx, const ::bigtable::Row& row,
+ virtual ::google::cloud::bigtable::RowRange MakeRowRange() = 0;
+ virtual ::google::cloud::bigtable::Filter MakeFilter() = 0;
+ virtual Status ParseRow(IteratorContext* ctx,
+ const ::google::cloud::bigtable::Row& row,
std::vector<Tensor>* out_tensors) = 0;
private:
@@ -122,15 +126,15 @@ class BigtableReaderDatasetIterator : public DatasetIterator<Dataset> {
// Note: the this in `this->dataset()` below is necessary due to namespace
// name conflicts.
- reader_.reset(new ::bigtable::RowReader(
+ reader_.reset(new ::google::cloud::bigtable::RowReader(
this->dataset()->table()->table().ReadRows(rows, filter)));
iterator_ = reader_->begin();
return Status::OK();
}
mutex mu_;
- std::unique_ptr<::bigtable::RowReader> reader_ GUARDED_BY(mu_);
- ::bigtable::RowReader::iterator iterator_ GUARDED_BY(mu_);
+ std::unique_ptr<::google::cloud::bigtable::RowReader> reader_ GUARDED_BY(mu_);
+ ::google::cloud::bigtable::RowReader::iterator iterator_ GUARDED_BY(mu_);
};
} // namespace tensorflow
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_lookup_dataset_op.cc b/tensorflow/contrib/bigtable/kernels/bigtable_lookup_dataset_op.cc
index 4b6d55a2d3..9e49fa35db 100644
--- a/tensorflow/contrib/bigtable/kernels/bigtable_lookup_dataset_op.cc
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_lookup_dataset_op.cc
@@ -97,16 +97,16 @@ class BigtableLookupDatasetOp : public UnaryDatasetOpKernel {
}
private:
- static ::bigtable::Filter MakeFilter(
+ static ::google::cloud::bigtable::Filter MakeFilter(
const std::vector<string>& column_families,
const std::vector<string>& columns) {
string column_family_regex = RegexFromStringSet(column_families);
string column_regex = RegexFromStringSet(columns);
- return ::bigtable::Filter::Chain(
- ::bigtable::Filter::Latest(1),
- ::bigtable::Filter::FamilyRegex(column_family_regex),
- ::bigtable::Filter::ColumnRegex(column_regex));
+ return ::google::cloud::bigtable::Filter::Chain(
+ ::google::cloud::bigtable::Filter::Latest(1),
+ ::google::cloud::bigtable::Filter::FamilyRegex(column_family_regex),
+ ::google::cloud::bigtable::Filter::ColumnRegex(column_regex));
}
class Iterator : public DatasetIterator<Dataset> {
@@ -163,7 +163,8 @@ class BigtableLookupDatasetOp : public UnaryDatasetOpKernel {
}
private:
- Status ParseRow(IteratorContext* ctx, const ::bigtable::Row& row,
+ Status ParseRow(IteratorContext* ctx,
+ const ::google::cloud::bigtable::Row& row,
std::vector<Tensor>* out_tensors) {
out_tensors->reserve(dataset()->columns_.size() + 1);
Tensor row_key_tensor(ctx->allocator({}), DT_STRING, {});
@@ -209,7 +210,7 @@ class BigtableLookupDatasetOp : public UnaryDatasetOpKernel {
const std::vector<string> columns_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
- const ::bigtable::Filter filter_;
+ const ::google::cloud::bigtable::Filter filter_;
};
};
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_prefix_key_dataset_op.cc b/tensorflow/contrib/bigtable/kernels/bigtable_prefix_key_dataset_op.cc
index 3d5c3cfdaa..e960719614 100644
--- a/tensorflow/contrib/bigtable/kernels/bigtable_prefix_key_dataset_op.cc
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_prefix_key_dataset_op.cc
@@ -74,15 +74,16 @@ class BigtablePrefixKeyDatasetOp : public DatasetOpKernel {
explicit Iterator(const Params& params)
: BigtableReaderDatasetIterator<Dataset>(params) {}
- ::bigtable::RowRange MakeRowRange() override {
- return ::bigtable::RowRange::Prefix(dataset()->prefix_);
+ ::google::cloud::bigtable::RowRange MakeRowRange() override {
+ return ::google::cloud::bigtable::RowRange::Prefix(dataset()->prefix_);
}
- ::bigtable::Filter MakeFilter() override {
- return ::bigtable::Filter::Chain(
- ::bigtable::Filter::CellsRowLimit(1),
- ::bigtable::Filter::StripValueTransformer());
+ ::google::cloud::bigtable::Filter MakeFilter() override {
+ return ::google::cloud::bigtable::Filter::Chain(
+ ::google::cloud::bigtable::Filter::CellsRowLimit(1),
+ ::google::cloud::bigtable::Filter::StripValueTransformer());
}
- Status ParseRow(IteratorContext* ctx, const ::bigtable::Row& row,
+ Status ParseRow(IteratorContext* ctx,
+ const ::google::cloud::bigtable::Row& row,
std::vector<Tensor>* out_tensors) override {
Tensor output_tensor(ctx->allocator({}), DT_STRING, {});
output_tensor.scalar<string>()() = string(row.row_key());
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_range_key_dataset_op.cc b/tensorflow/contrib/bigtable/kernels/bigtable_range_key_dataset_op.cc
index 7fa06052c5..96d3565d9b 100644
--- a/tensorflow/contrib/bigtable/kernels/bigtable_range_key_dataset_op.cc
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_range_key_dataset_op.cc
@@ -81,16 +81,17 @@ class BigtableRangeKeyDatasetOp : public DatasetOpKernel {
explicit Iterator(const Params& params)
: BigtableReaderDatasetIterator<Dataset>(params) {}
- ::bigtable::RowRange MakeRowRange() override {
- return ::bigtable::RowRange::Range(dataset()->start_key_,
- dataset()->end_key_);
+ ::google::cloud::bigtable::RowRange MakeRowRange() override {
+ return ::google::cloud::bigtable::RowRange::Range(dataset()->start_key_,
+ dataset()->end_key_);
}
- ::bigtable::Filter MakeFilter() override {
- return ::bigtable::Filter::Chain(
- ::bigtable::Filter::CellsRowLimit(1),
- ::bigtable::Filter::StripValueTransformer());
+ ::google::cloud::bigtable::Filter MakeFilter() override {
+ return ::google::cloud::bigtable::Filter::Chain(
+ ::google::cloud::bigtable::Filter::CellsRowLimit(1),
+ ::google::cloud::bigtable::Filter::StripValueTransformer());
}
- Status ParseRow(IteratorContext* ctx, const ::bigtable::Row& row,
+ Status ParseRow(IteratorContext* ctx,
+ const ::google::cloud::bigtable::Row& row,
std::vector<Tensor>* out_tensors) override {
Tensor output_tensor(ctx->allocator({}), DT_STRING, {});
output_tensor.scalar<string>()() = string(row.row_key());
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_scan_dataset_op.cc b/tensorflow/contrib/bigtable/kernels/bigtable_scan_dataset_op.cc
index 11b9bd2bdc..13cb868167 100644
--- a/tensorflow/contrib/bigtable/kernels/bigtable_scan_dataset_op.cc
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_scan_dataset_op.cc
@@ -135,28 +135,33 @@ class BigtableScanDatasetOp : public DatasetOpKernel {
explicit Iterator(const Params& params)
: BigtableReaderDatasetIterator<Dataset>(params) {}
- ::bigtable::RowRange MakeRowRange() override {
+ ::google::cloud::bigtable::RowRange MakeRowRange() override {
if (!dataset()->prefix_.empty()) {
DCHECK(dataset()->start_key_.empty());
- return ::bigtable::RowRange::Prefix(dataset()->prefix_);
+ return ::google::cloud::bigtable::RowRange::Prefix(
+ dataset()->prefix_);
} else {
DCHECK(!dataset()->start_key_.empty())
<< "Both prefix and start_key were empty!";
- return ::bigtable::RowRange::Range(dataset()->start_key_,
- dataset()->end_key_);
+ return ::google::cloud::bigtable::RowRange::Range(
+ dataset()->start_key_, dataset()->end_key_);
}
}
- ::bigtable::Filter MakeFilter() override {
+ ::google::cloud::bigtable::Filter MakeFilter() override {
// TODO(saeta): Investigate optimal ordering here.
- return ::bigtable::Filter::Chain(
- ::bigtable::Filter::Latest(1),
- ::bigtable::Filter::FamilyRegex(dataset()->column_family_regex_),
- ::bigtable::Filter::ColumnRegex(dataset()->column_regex_),
+ return ::google::cloud::bigtable::Filter::Chain(
+ ::google::cloud::bigtable::Filter::Latest(1),
+ ::google::cloud::bigtable::Filter::FamilyRegex(
+ dataset()->column_family_regex_),
+ ::google::cloud::bigtable::Filter::ColumnRegex(
+ dataset()->column_regex_),
dataset()->probability_ != 1.0
- ? ::bigtable::Filter::RowSample(dataset()->probability_)
- : ::bigtable::Filter::PassAllFilter());
+ ? ::google::cloud::bigtable::Filter::RowSample(
+ dataset()->probability_)
+ : ::google::cloud::bigtable::Filter::PassAllFilter());
}
- Status ParseRow(IteratorContext* ctx, const ::bigtable::Row& row,
+ Status ParseRow(IteratorContext* ctx,
+ const ::google::cloud::bigtable::Row& row,
std::vector<Tensor>* out_tensors) override {
out_tensors->reserve(dataset()->columns_.size() + 1);
Tensor row_key_tensor(ctx->allocator({}), DT_STRING, {});
diff --git a/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client.h b/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client.h
index dcce6a33a7..dac2b16a21 100644
--- a/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client.h
+++ b/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client.h
@@ -22,7 +22,7 @@ limitations under the License.
namespace tensorflow {
-class BigtableTestClient : public ::bigtable::DataClient {
+class BigtableTestClient : public ::google::cloud::bigtable::DataClient {
public:
std::string const& project_id() const override { return project_id_; }
std::string const& instance_id() const override { return instance_id_; }
diff --git a/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client_op.cc b/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client_op.cc
index f9be9ec6e2..fa3e587b90 100644
--- a/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client_op.cc
+++ b/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client_op.cc
@@ -41,22 +41,23 @@ class BigtableTestClientOp : public OpKernel {
ResourceMgr* mgr = ctx->resource_manager();
OP_REQUIRES_OK(ctx, cinfo_.Init(mgr, def()));
BigtableClientResource* resource;
- OP_REQUIRES_OK(ctx,
- mgr->LookupOrCreate<BigtableClientResource>(
- cinfo_.container(), cinfo_.name(), &resource,
- [this, ctx](BigtableClientResource** ret)
- EXCLUSIVE_LOCKS_REQUIRED(mu_) {
- std::shared_ptr<bigtable::DataClient> client(
- new BigtableTestClient());
- // Note: must make explicit copies to sequence
- // them before the move of client.
- string project_id = client->project_id();
- string instance_id = client->instance_id();
- *ret = new BigtableClientResource(
- std::move(project_id),
- std::move(instance_id), std::move(client));
- return Status::OK();
- }));
+ OP_REQUIRES_OK(
+ ctx,
+ mgr->LookupOrCreate<BigtableClientResource>(
+ cinfo_.container(), cinfo_.name(), &resource,
+ [this, ctx](BigtableClientResource** ret)
+ EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ std::shared_ptr<google::cloud::bigtable::DataClient> client(
+ new BigtableTestClient());
+ // Note: must make explicit copies to sequence
+ // them before the move of client.
+ string project_id = client->project_id();
+ string instance_id = client->instance_id();
+ *ret = new BigtableClientResource(std::move(project_id),
+ std::move(instance_id),
+ std::move(client));
+ return Status::OK();
+ }));
initialized_ = true;
}
OP_REQUIRES_OK(ctx, MakeResourceHandleToOutput(
diff --git a/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client_test.cc b/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client_test.cc
index bd362f7de5..d6b3964719 100644
--- a/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client_test.cc
+++ b/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client_test.cc
@@ -21,34 +21,37 @@ namespace tensorflow {
namespace {
void WriteCell(const string& row, const string& family, const string& column,
- const string& value, ::bigtable::noex::Table* table) {
- ::bigtable::SingleRowMutation mut(row);
- mut.emplace_back(::bigtable::SetCell(family, column, value));
+ const string& value,
+ ::google::cloud::bigtable::noex::Table* table) {
+ ::google::cloud::bigtable::SingleRowMutation mut(row);
+ mut.emplace_back(::google::cloud::bigtable::SetCell(family, column, value));
table->Apply(std::move(mut));
}
TEST(BigtableTestClientTest, EmptyRowRead) {
- std::shared_ptr<::bigtable::DataClient> client_ptr =
+ std::shared_ptr<::google::cloud::bigtable::DataClient> client_ptr =
std::make_shared<BigtableTestClient>();
- ::bigtable::noex::Table table(client_ptr, "test_table");
+ ::google::cloud::bigtable::noex::Table table(client_ptr, "test_table");
- ::bigtable::RowSet rowset;
+ ::google::cloud::bigtable::RowSet rowset;
rowset.Append("r1");
- auto filter = ::bigtable::Filter::Chain(::bigtable::Filter::Latest(1));
+ auto filter = ::google::cloud::bigtable::Filter::Chain(
+ ::google::cloud::bigtable::Filter::Latest(1));
auto rows = table.ReadRows(std::move(rowset), filter);
EXPECT_EQ(rows.begin(), rows.end()) << "Some rows were returned in response!";
EXPECT_TRUE(rows.Finish().ok()) << "Error reading rows.";
}
TEST(BigtableTestClientTest, SingleRowWriteAndRead) {
- std::shared_ptr<::bigtable::DataClient> client_ptr =
+ std::shared_ptr<::google::cloud::bigtable::DataClient> client_ptr =
std::make_shared<BigtableTestClient>();
- ::bigtable::noex::Table table(client_ptr, "test_table");
+ ::google::cloud::bigtable::noex::Table table(client_ptr, "test_table");
WriteCell("r1", "f1", "c1", "v1", &table);
- ::bigtable::RowSet rowset("r1");
- auto filter = ::bigtable::Filter::Chain(::bigtable::Filter::Latest(1));
+ ::google::cloud::bigtable::RowSet rowset("r1");
+ auto filter = ::google::cloud::bigtable::Filter::Chain(
+ ::google::cloud::bigtable::Filter::Latest(1));
auto rows = table.ReadRows(std::move(rowset), filter);
auto itr = rows.begin();
EXPECT_NE(itr, rows.end()) << "No rows were returned in response!";
@@ -64,16 +67,17 @@ TEST(BigtableTestClientTest, SingleRowWriteAndRead) {
}
TEST(BigtableTestClientTest, MultiRowWriteAndSingleRowRead) {
- std::shared_ptr<::bigtable::DataClient> client_ptr =
+ std::shared_ptr<::google::cloud::bigtable::DataClient> client_ptr =
std::make_shared<BigtableTestClient>();
- ::bigtable::noex::Table table(client_ptr, "test_table");
+ ::google::cloud::bigtable::noex::Table table(client_ptr, "test_table");
WriteCell("r1", "f1", "c1", "v1", &table);
WriteCell("r2", "f1", "c1", "v2", &table);
WriteCell("r3", "f1", "c1", "v3", &table);
- ::bigtable::RowSet rowset("r1");
- auto filter = ::bigtable::Filter::Chain(::bigtable::Filter::Latest(1));
+ ::google::cloud::bigtable::RowSet rowset("r1");
+ auto filter = ::google::cloud::bigtable::Filter::Chain(
+ ::google::cloud::bigtable::Filter::Latest(1));
auto rows = table.ReadRows(std::move(rowset), filter);
auto itr = rows.begin();
@@ -90,16 +94,17 @@ TEST(BigtableTestClientTest, MultiRowWriteAndSingleRowRead) {
}
TEST(BigtableTestClientTest, MultiRowWriteAndRead) {
- std::shared_ptr<::bigtable::DataClient> client_ptr =
+ std::shared_ptr<::google::cloud::bigtable::DataClient> client_ptr =
std::make_shared<BigtableTestClient>();
- ::bigtable::noex::Table table(client_ptr, "test_table");
+ ::google::cloud::bigtable::noex::Table table(client_ptr, "test_table");
WriteCell("r1", "f1", "c1", "v1", &table);
WriteCell("r2", "f1", "c1", "v2", &table);
WriteCell("r3", "f1", "c1", "v3", &table);
- ::bigtable::RowSet rowset("r1", "r2", "r3");
- auto filter = ::bigtable::Filter::Chain(::bigtable::Filter::Latest(1));
+ ::google::cloud::bigtable::RowSet rowset("r1", "r2", "r3");
+ auto filter = ::google::cloud::bigtable::Filter::Chain(
+ ::google::cloud::bigtable::Filter::Latest(1));
auto rows = table.ReadRows(std::move(rowset), filter);
auto itr = rows.begin();
@@ -134,16 +139,18 @@ TEST(BigtableTestClientTest, MultiRowWriteAndRead) {
}
TEST(BigtableTestClientTest, MultiRowWriteAndPrefixRead) {
- std::shared_ptr<::bigtable::DataClient> client_ptr =
+ std::shared_ptr<::google::cloud::bigtable::DataClient> client_ptr =
std::make_shared<BigtableTestClient>();
- ::bigtable::noex::Table table(client_ptr, "test_table");
+ ::google::cloud::bigtable::noex::Table table(client_ptr, "test_table");
WriteCell("r1", "f1", "c1", "v1", &table);
WriteCell("r2", "f1", "c1", "v2", &table);
WriteCell("r3", "f1", "c1", "v3", &table);
- auto filter = ::bigtable::Filter::Chain(::bigtable::Filter::Latest(1));
- auto rows = table.ReadRows(::bigtable::RowRange::Prefix("r"), filter);
+ auto filter = ::google::cloud::bigtable::Filter::Chain(
+ ::google::cloud::bigtable::Filter::Latest(1));
+ auto rows =
+ table.ReadRows(::google::cloud::bigtable::RowRange::Prefix("r"), filter);
auto itr = rows.begin();
EXPECT_NE(itr, rows.end()) << "Missing rows";
@@ -177,9 +184,9 @@ TEST(BigtableTestClientTest, MultiRowWriteAndPrefixRead) {
}
TEST(BigtableTestClientTest, ColumnFiltering) {
- std::shared_ptr<::bigtable::DataClient> client_ptr =
+ std::shared_ptr<::google::cloud::bigtable::DataClient> client_ptr =
std::make_shared<BigtableTestClient>();
- ::bigtable::noex::Table table(client_ptr, "test_table");
+ ::google::cloud::bigtable::noex::Table table(client_ptr, "test_table");
WriteCell("r1", "f1", "c1", "v1", &table);
WriteCell("r2", "f1", "c1", "v2", &table);
@@ -190,10 +197,12 @@ TEST(BigtableTestClientTest, ColumnFiltering) {
WriteCell("r2", "f2", "c1", "v2", &table);
WriteCell("r3", "f1", "c2", "v3", &table);
- auto filter = ::bigtable::Filter::Chain(
- ::bigtable::Filter::Latest(1), ::bigtable::Filter::FamilyRegex("f1"),
- ::bigtable::Filter::ColumnRegex("c1"));
- auto rows = table.ReadRows(::bigtable::RowRange::Prefix("r"), filter);
+ auto filter = ::google::cloud::bigtable::Filter::Chain(
+ ::google::cloud::bigtable::Filter::Latest(1),
+ ::google::cloud::bigtable::Filter::FamilyRegex("f1"),
+ ::google::cloud::bigtable::Filter::ColumnRegex("c1"));
+ auto rows =
+ table.ReadRows(::google::cloud::bigtable::RowRange::Prefix("r"), filter);
auto itr = rows.begin();
EXPECT_NE(itr, rows.end()) << "Missing rows";
@@ -227,9 +236,9 @@ TEST(BigtableTestClientTest, ColumnFiltering) {
}
TEST(BigtableTestClientTest, RowKeys) {
- std::shared_ptr<::bigtable::DataClient> client_ptr =
+ std::shared_ptr<::google::cloud::bigtable::DataClient> client_ptr =
std::make_shared<BigtableTestClient>();
- ::bigtable::noex::Table table(client_ptr, "test_table");
+ ::google::cloud::bigtable::noex::Table table(client_ptr, "test_table");
WriteCell("r1", "f1", "c1", "v1", &table);
WriteCell("r2", "f1", "c1", "v2", &table);
@@ -240,10 +249,12 @@ TEST(BigtableTestClientTest, RowKeys) {
WriteCell("r2", "f2", "c1", "v2", &table);
WriteCell("r3", "f1", "c2", "v3", &table);
- auto filter = ::bigtable::Filter::Chain(
- ::bigtable::Filter::Latest(1), ::bigtable::Filter::CellsRowLimit(1),
- ::bigtable::Filter::StripValueTransformer());
- auto rows = table.ReadRows(::bigtable::RowRange::Prefix("r"), filter);
+ auto filter = ::google::cloud::bigtable::Filter::Chain(
+ ::google::cloud::bigtable::Filter::Latest(1),
+ ::google::cloud::bigtable::Filter::CellsRowLimit(1),
+ ::google::cloud::bigtable::Filter::StripValueTransformer());
+ auto rows =
+ table.ReadRows(::google::cloud::bigtable::RowRange::Prefix("r"), filter);
auto itr = rows.begin();
EXPECT_NE(itr, rows.end()) << "Missing rows";
EXPECT_EQ(itr->row_key(), "r1");