aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2016-06-29 17:17:39 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-06-29 18:31:33 -0700
commitaae01a1dea3a8345dc91f03df0baf1c6f8b1da23 (patch)
tree0dbb7409a9e067e523bcf8e6ed99a8cf3307cf13 /tensorflow/contrib
parent025e65591ef201118f1490c28f0f5df89cbf5104 (diff)
Fix sdca_ops_test benchmark so that it works with the update SdcaSolver args.
Add benchmark for sparse data with many values. Currently performs terribly, optimization to follow separately. Change: 126255533
Diffstat (limited to 'tensorflow/contrib')
-rw-r--r--tensorflow/contrib/linear_optimizer/kernels/BUILD1
-rw-r--r--tensorflow/contrib/linear_optimizer/kernels/sdca_ops_test.cc59
2 files changed, 34 insertions, 26 deletions
diff --git a/tensorflow/contrib/linear_optimizer/kernels/BUILD b/tensorflow/contrib/linear_optimizer/kernels/BUILD
index 1f68065c60..0fb037f5de 100644
--- a/tensorflow/contrib/linear_optimizer/kernels/BUILD
+++ b/tensorflow/contrib/linear_optimizer/kernels/BUILD
@@ -58,6 +58,7 @@ tf_cc_test(
linkstatic = tf_kernel_tests_linkstatic(), # Required for benchmarking
deps = [
"//tensorflow/contrib/linear_optimizer:sdca_op_kernels",
+ "//tensorflow/contrib/linear_optimizer:sdca_ops_op_lib",
"//tensorflow/core:all_kernels",
"//tensorflow/core:core_cpu",
"//tensorflow/core:framework",
diff --git a/tensorflow/contrib/linear_optimizer/kernels/sdca_ops_test.cc b/tensorflow/contrib/linear_optimizer/kernels/sdca_ops_test.cc
index ae50553a60..d776170367 100644
--- a/tensorflow/contrib/linear_optimizer/kernels/sdca_ops_test.cc
+++ b/tensorflow/contrib/linear_optimizer/kernels/sdca_ops_test.cc
@@ -51,12 +51,14 @@ std::vector<Node*> VarVector(Graph* const g, const int nodes,
return result;
}
-Node* Zeros(Graph* const g, const int n) {
- Tensor data(DT_FLOAT, TensorShape({n}));
+Node* Zeros(Graph* const g, const TensorShape& shape) {
+ Tensor data(DT_FLOAT, shape);
data.flat<float>().setZero();
return test::graph::Constant(g, data);
}
+Node* Zeros(Graph* const g, const int n) { return Zeros(g, TensorShape({n})); }
+
Node* Ones(Graph* const g, const int n) {
Tensor data(DT_FLOAT, TensorShape({n}));
test::FillFn<float>(&data, [](const int i) { return 1.0f; });
@@ -166,28 +168,25 @@ void GetGraphs(const int32 num_examples, const int32 sparse_feature_groups,
Node* const weights = Ones(g, num_examples);
Node* const labels = RandomZeroOrOne(g, num_examples);
- Node* const ids = StringIota(g, num_examples);
+ Node* const example_state_data = Zeros(g, TensorShape({num_examples, 4}));
Node* sdca = nullptr;
- TF_CHECK_OK(
- NodeBuilder(g->NewName("sdca"), "SdcaSolver")
- .Attr("loss_type", "logistic_loss")
- .Attr("num_sparse_features", sparse_feature_groups)
- .Attr("num_dense_features", dense_feature_groups)
- .Attr("l1", 0.0)
- .Attr("l2", 1.0)
- .Attr("num_inner_iterations", 2)
- .Attr("container", strings::StrCat(strings::Hex(random::New64())))
- .Attr("solver_uuid", strings::StrCat(strings::Hex(random::New64())))
- .Input(sparse_indices)
- .Input(sparse_values)
- .Input(dense_features)
- .Input(weights)
- .Input(labels)
- .Input(ids)
- .Input(sparse_weights)
- .Input(dense_weights)
- .Finalize(g, &sdca));
+ TF_CHECK_OK(NodeBuilder(g->NewName("sdca"), "SdcaSolver")
+ .Attr("loss_type", "logistic_loss")
+ .Attr("num_sparse_features", sparse_feature_groups)
+ .Attr("num_dense_features", dense_feature_groups)
+ .Attr("l1", 0.0)
+ .Attr("l2", 1.0)
+ .Attr("num_inner_iterations", 2)
+ .Input(sparse_indices)
+ .Input(sparse_values)
+ .Input(dense_features)
+ .Input(weights)
+ .Input(labels)
+ .Input(sparse_weights)
+ .Input(dense_weights)
+ .Input(example_state_data)
+ .Finalize(g, &sdca));
*train_g = g;
}
@@ -202,14 +201,22 @@ void BM_SDCA(const int iters, const int num_examples) {
&train);
testing::StartTiming();
test::Benchmark("cpu", train, GetOptions(), init).Run(iters);
- // TODO(sibyl-toe9oF2e): Each all to Run() currently creates a container which
- // gets deleted as the context gets deleted. It would be nicer to
- // explicitly clean up the container ourselves at this point (after calling
- // testing::StopTiming).
+}
+
+void BM_SDCA_LARGE_SPARSE(const int iters, const int num_examples) {
+ testing::StopTiming();
+ Graph* init = nullptr;
+ Graph* train = nullptr;
+ GetGraphs(num_examples, 65 /* sparse feature groups */,
+ 1e6 /* sparse features per group */, 0 /* dense features */, &init,
+ &train);
+ testing::StartTiming();
+ test::Benchmark("cpu", train, GetOptions(), init).Run(iters);
}
} // namespace
BENCHMARK(BM_SDCA)->Arg(128)->Arg(256)->Arg(512)->Arg(1024);
+BENCHMARK(BM_SDCA_LARGE_SPARSE)->Arg(128)->Arg(256)->Arg(512)->Arg(1024);
} // namespace tensorflow