aboutsummaryrefslogtreecommitdiffhomepage
path: root/bench/tensors/tensor_benchmarks.h
diff options
context:
space:
mode:
authorGravatar Yangqing Jia <me@daggerfs.com>2016-01-28 10:35:14 -0800
committerGravatar Yangqing Jia <me@daggerfs.com>2016-01-28 10:35:14 -0800
commitc4e47630b16a716d01dc20b36afa8882b03681a1 (patch)
treefe2cd8765e7264da3a48712fcbbeddd0733780ef /bench/tensors/tensor_benchmarks.h
parent4865e1e73265e12d564f8b4d9069a2159f777d90 (diff)
benchmark modifications to make it compilable in a standalone fashion.
Diffstat (limited to 'bench/tensors/tensor_benchmarks.h')
-rw-r--r--bench/tensors/tensor_benchmarks.h87
1 files changed, 49 insertions, 38 deletions
diff --git a/bench/tensors/tensor_benchmarks.h b/bench/tensors/tensor_benchmarks.h
index 525b9acda..a1696afda 100644
--- a/bench/tensors/tensor_benchmarks.h
+++ b/bench/tensors/tensor_benchmarks.h
@@ -4,12 +4,23 @@
typedef int TensorIndex;
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
-#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
-#include "testing/base/public/benchmark.h"
+#include "unsupported/Eigen/CXX11/Tensor"
+#include "benchmark.h"
+
+#define BENCHMARK_RANGE(bench, lo, hi) \
+ BENCHMARK(bench)->Range(lo, hi)
+
+template <typename... Args>
+std::string StrCat(const Args... args) {
+ std::stringstream ss;
+ StrCatRecursive(ss, args...);
+ return ss.str();
+}
using Eigen::Tensor;
using Eigen::TensorMap;
+typedef int64_t int64;
// TODO(bsteiner): also templatize on the input type since we have users
// for int8 as well as floats.
@@ -43,7 +54,7 @@ template <typename Device> class BenchmarkSuite {
void random(int num_iters) {
eigen_assert(m_ == k_ && k_ == n_);
- const Eigen::array<TensorIndex, 2> sizes(m_, m_);
+ const Eigen::array<TensorIndex, 2> sizes = {{m_, m_}};
TensorMap<Tensor<float, 2>, Eigen::Aligned> C(c_, sizes);
StartBenchmarkTiming();
@@ -56,16 +67,16 @@ template <typename Device> class BenchmarkSuite {
void slicing(int num_iters) {
eigen_assert(m_ == k_ && k_ == n_);
- const Eigen::array<TensorIndex, 2> sizes(m_, m_);
+ const Eigen::array<TensorIndex, 2> sizes = {{m_, m_}};
const TensorMap<Tensor<float, 2>, Eigen::Aligned> A(a_, sizes);
const TensorMap<Tensor<float, 2>, Eigen::Aligned> B(b_, sizes);
TensorMap<Tensor<float, 2>, Eigen::Aligned> C(c_, sizes);
- const Eigen::DSizes<TensorIndex, 2> quarter_sizes(Eigen::array<TensorIndex, 2>(m_/2, m_/2));
- const Eigen::DSizes<TensorIndex, 2> first_quadrant(Eigen::array<TensorIndex, 2>(0, 0));
- const Eigen::DSizes<TensorIndex, 2> second_quadrant(Eigen::array<TensorIndex, 2>(0, m_/2));
- const Eigen::DSizes<TensorIndex, 2> third_quadrant(Eigen::array<TensorIndex, 2>(m_/2, 0));
- const Eigen::DSizes<TensorIndex, 2> fourth_quadrant(Eigen::array<TensorIndex, 2>(m_/2, m_/2));
+ const Eigen::DSizes<TensorIndex, 2> quarter_sizes(m_/2, m_/2);
+ const Eigen::DSizes<TensorIndex, 2> first_quadrant(0, 0);
+ const Eigen::DSizes<TensorIndex, 2> second_quadrant(0, m_/2);
+ const Eigen::DSizes<TensorIndex, 2> third_quadrant(m_/2, 0);
+ const Eigen::DSizes<TensorIndex, 2> fourth_quadrant(m_/2, m_/2);
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
@@ -85,12 +96,12 @@ template <typename Device> class BenchmarkSuite {
void shuffling(int num_iters) {
eigen_assert(m_ == n_);
- const Eigen::array<TensorIndex, 2> size_a(m_, k_);
+ const Eigen::array<TensorIndex, 2> size_a = {{m_, k_}};
const TensorMap<Tensor<float, 2>, Eigen::Aligned> A(a_, size_a);
- const Eigen::array<TensorIndex, 2> size_b(k_, m_);
+ const Eigen::array<TensorIndex, 2> size_b = {{k_, m_}};
TensorMap<Tensor<float, 2>, Eigen::Aligned> B(b_, size_b);
- const Eigen::array<int, 2> shuffle(1, 0);
+ const Eigen::array<int, 2> shuffle = {{1, 0}};
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
@@ -102,9 +113,9 @@ template <typename Device> class BenchmarkSuite {
void padding(int num_iters) {
eigen_assert(m_ == k_);
- const Eigen::array<TensorIndex, 2> size_a(m_, k_-3);
+ const Eigen::array<TensorIndex, 2> size_a = {{m_, k_-3}};
const TensorMap<Tensor<float, 2>, Eigen::Aligned> A(a_, size_a);
- const Eigen::array<TensorIndex, 2> size_b(k_, m_);
+ const Eigen::array<TensorIndex, 2> size_b = {{k_, m_}};
TensorMap<Tensor<float, 2>, Eigen::Aligned> B(b_, size_b);
Eigen::array<Eigen::IndexPair<TensorIndex>, 2> paddings;
@@ -121,12 +132,12 @@ template <typename Device> class BenchmarkSuite {
void striding(int num_iters) {
eigen_assert(m_ == k_);
- const Eigen::array<TensorIndex, 2> size_a(m_, k_);
+ const Eigen::array<TensorIndex, 2> size_a = {{m_, k_}};
const TensorMap<Tensor<float, 2>, Eigen::Aligned> A(a_, size_a);
- const Eigen::array<TensorIndex, 2> size_b(m_, k_ / 2);
+ const Eigen::array<TensorIndex, 2> size_b = {{m_, k_ / 2}};
TensorMap<Tensor<float, 2>, Eigen::Aligned> B(b_, size_b);
- const Eigen::array<TensorIndex, 2> strides(1, 2);
+ const Eigen::array<TensorIndex, 2> strides = {{1, 2}};
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
@@ -137,14 +148,14 @@ template <typename Device> class BenchmarkSuite {
}
void broadcasting(int num_iters) {
- const Eigen::array<TensorIndex, 2> size_a(m_, 1);
+ const Eigen::array<TensorIndex, 2> size_a = {{m_, 1}};
const TensorMap<Tensor<float, 2>, Eigen::Aligned> A(a_, size_a);
- const Eigen::array<TensorIndex, 2> size_c(m_, n_);
+ const Eigen::array<TensorIndex, 2> size_c = {{m_, n_}};
TensorMap<Tensor<float, 2>, Eigen::Aligned> C(c_, size_c);
-#if defined(__CUDACC__)
+#ifndef EIGEN_HAS_INDEX_LIST
// nvcc doesn't support cxx11
- const Eigen::array<int, 2> broadcast(1, n_);
+ const Eigen::array<int, 2> broadcast = {{1, n_}};
#else
// Take advantage of cxx11 to give the compiler information it can use to
// optimize the code.
@@ -162,7 +173,7 @@ template <typename Device> class BenchmarkSuite {
void coeffWiseOp(int num_iters) {
eigen_assert(m_ == k_ && k_ == n_);
- const Eigen::array<TensorIndex, 2> sizes(m_, m_);
+ const Eigen::array<TensorIndex, 2> sizes = {{m_, m_}};
const TensorMap<Tensor<float, 2>, Eigen::Aligned> A(a_, sizes);
const TensorMap<Tensor<float, 2>, Eigen::Aligned> B(b_, sizes);
TensorMap<Tensor<float, 2>, Eigen::Aligned> C(c_, sizes);
@@ -178,7 +189,7 @@ template <typename Device> class BenchmarkSuite {
void algebraicFunc(int num_iters) {
eigen_assert(m_ == k_ && k_ == n_);
- const Eigen::array<TensorIndex, 2> sizes(m_, m_);
+ const Eigen::array<TensorIndex, 2> sizes = {{m_, m_}};
const TensorMap<Tensor<float, 2>, Eigen::Aligned> A(a_, sizes);
const TensorMap<Tensor<float, 2>, Eigen::Aligned> B(b_, sizes);
TensorMap<Tensor<float, 2>, Eigen::Aligned> C(c_, sizes);
@@ -194,7 +205,7 @@ template <typename Device> class BenchmarkSuite {
void transcendentalFunc(int num_iters) {
eigen_assert(m_ == k_ && k_ == n_);
- const Eigen::array<TensorIndex, 2> sizes(m_, m_);
+ const Eigen::array<TensorIndex, 2> sizes = {{m_, m_}};
const TensorMap<Tensor<float, 2>, Eigen::Aligned> A(a_, sizes);
const TensorMap<Tensor<float, 2>, Eigen::Aligned> B(b_, sizes);
TensorMap<Tensor<float, 2>, Eigen::Aligned> C(c_, sizes);
@@ -210,12 +221,12 @@ template <typename Device> class BenchmarkSuite {
// Simple reduction
void reduction(int num_iters) {
- const Eigen::array<TensorIndex, 2> input_size(k_, n_);
+ const Eigen::array<TensorIndex, 2> input_size = {{k_, n_}};
const TensorMap<Tensor<float, 2>, Eigen::Aligned> B(b_, input_size);
- const Eigen::array<TensorIndex, 1> output_size(n_);
+ const Eigen::array<TensorIndex, 1> output_size = {{n_}};
TensorMap<Tensor<float, 1>, Eigen::Aligned> C(c_, output_size);
- const Eigen::array<TensorIndex, 1> sum_along_dim(0);
+ const Eigen::array<TensorIndex, 1> sum_along_dim = {{0}};
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
@@ -228,16 +239,16 @@ template <typename Device> class BenchmarkSuite {
// do a contraction which is equivalent to a matrix multiplication
void contraction(int num_iters) {
- const Eigen::array<TensorIndex, 2> sizeA(m_, k_);
- const Eigen::array<TensorIndex, 2> sizeB(k_, n_);
- const Eigen::array<TensorIndex, 2> sizeC(m_, n_);
+ const Eigen::array<TensorIndex, 2> sizeA = {{m_, k_}};
+ const Eigen::array<TensorIndex, 2> sizeB = {{k_, n_}};
+ const Eigen::array<TensorIndex, 2> sizeC = {{m_, n_}};
const TensorMap<Tensor<float, 2>, Eigen::Aligned> A(a_, sizeA);
const TensorMap<Tensor<float, 2>, Eigen::Aligned> B(b_, sizeB);
TensorMap<Tensor<float, 2>, Eigen::Aligned> C(c_, sizeC);
typedef typename Tensor<float, 2>::DimensionPair DimPair;
- const Eigen::array<DimPair, 1> dims(DimPair(1, 0));
+ const Eigen::array<DimPair, 1> dims = {{DimPair(1, 0)}};
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
@@ -249,14 +260,14 @@ template <typename Device> class BenchmarkSuite {
}
void convolution(int num_iters, int kernel_x, int kernel_y) {
- const Eigen::array<TensorIndex, 2> input_sizes(m_, n_);
+ const Eigen::array<TensorIndex, 2> input_sizes = {{m_, n_}};
TensorMap<Tensor<float, 2>, Eigen::Aligned> A(a_, input_sizes);
- const Eigen::array<TensorIndex, 2> kernel_sizes(kernel_x, kernel_y);
+ const Eigen::array<TensorIndex, 2> kernel_sizes = {{kernel_x, kernel_y}};
TensorMap<Tensor<float, 2>, Eigen::Aligned> B(b_, kernel_sizes);
- const Eigen::array<TensorIndex, 2> result_sizes(
- m_ - kernel_x + 1, n_ - kernel_y + 1);
+ const Eigen::array<TensorIndex, 2> result_sizes =
+ {{m_ - kernel_x + 1, n_ - kernel_y + 1}};
TensorMap<Tensor<float, 2>, Eigen::Aligned> C(c_, result_sizes);
- Eigen::array<Tensor<float, 2>::Index, 2> dims(0, 1);
+ Eigen::array<Tensor<float, 2>::Index, 2> dims = {{0, 1}};
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
@@ -280,7 +291,7 @@ template <typename Device> class BenchmarkSuite {
device_.memset(b_, 23, k_ * n_ * sizeof(float));
device_.memset(c_, 31, m_ * n_ * sizeof(float));
- BenchmarkUseRealTime();
+ //BenchmarkUseRealTime();
}
inline void finalizeBenchmark(int64 num_items) {
@@ -290,7 +301,7 @@ template <typename Device> class BenchmarkSuite {
}
#endif
StopBenchmarkTiming();
- SetBenchmarkItemsProcessed(num_items);
+ SetBenchmarkBytesProcessed(num_items);
}