aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2014-09-04 20:27:28 -0700
committerGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2014-09-04 20:27:28 -0700
commit1abe4ed14c0012d85e833c5f507f282cf26edc36 (patch)
treec9d8e8fc6f6fdcba6d3101a2e3baf5634ebffd8c
parentd43f737b4ad52e84a3b4d954d9bfb4c40cf9e819 (diff)
Created more regression tests
-rw-r--r--test/main.h1
-rw-r--r--unsupported/test/cxx11_tensor_assign.cpp26
-rw-r--r--unsupported/test/cxx11_tensor_contraction.cpp166
-rw-r--r--unsupported/test/cxx11_tensor_device.cpp279
-rw-r--r--unsupported/test/cxx11_tensor_shuffling.cpp47
-rw-r--r--unsupported/test/cxx11_tensor_simple.cpp26
6 files changed, 510 insertions, 35 deletions
diff --git a/test/main.h b/test/main.h
index 3295dcb71..763cec8f9 100644
--- a/test/main.h
+++ b/test/main.h
@@ -207,6 +207,7 @@ inline void verify_impl(bool condition, const char *testname, const char *file,
#define VERIFY(a) ::verify_impl(a, g_test_stack.back().c_str(), __FILE__, __LINE__, EI_PP_MAKE_STRING(a))
#define VERIFY_IS_EQUAL(a, b) VERIFY(test_is_equal(a, b))
+#define VERIFY_IS_NOT_EQUAL(a, b) VERIFY(!test_is_equal(a, b))
#define VERIFY_IS_APPROX(a, b) VERIFY(test_isApprox(a, b))
#define VERIFY_IS_NOT_APPROX(a, b) VERIFY(!test_isApprox(a, b))
#define VERIFY_IS_MUCH_SMALLER_THAN(a, b) VERIFY(test_isMuchSmallerThan(a, b))
diff --git a/unsupported/test/cxx11_tensor_assign.cpp b/unsupported/test/cxx11_tensor_assign.cpp
index b024bed19..f2b126413 100644
--- a/unsupported/test/cxx11_tensor_assign.cpp
+++ b/unsupported/test/cxx11_tensor_assign.cpp
@@ -228,6 +228,30 @@ static void test_same_type()
}
}
+static void test_auto_resize()
+{
+ Tensor<int, 1> tensor1;
+ Tensor<int, 1> tensor2(3);
+ Tensor<int, 1> tensor3(5);
+ Tensor<int, 1> tensor4(7);
+
+ Tensor<int, 1> new_tensor(5);
+ new_tensor.setRandom();
+
+ tensor1 = tensor2 = tensor3 = tensor4 = new_tensor;
+
+ VERIFY_IS_EQUAL(tensor1.dimension(0), new_tensor.dimension(0));
+ VERIFY_IS_EQUAL(tensor2.dimension(0), new_tensor.dimension(0));
+ VERIFY_IS_EQUAL(tensor3.dimension(0), new_tensor.dimension(0));
+ VERIFY_IS_EQUAL(tensor4.dimension(0), new_tensor.dimension(0));
+ for (int i = 0; i < new_tensor.dimension(0); ++i) {
+ VERIFY_IS_EQUAL(tensor1(i), new_tensor(i));
+ VERIFY_IS_EQUAL(tensor2(i), new_tensor(i));
+ VERIFY_IS_EQUAL(tensor3(i), new_tensor(i));
+ VERIFY_IS_EQUAL(tensor4(i), new_tensor(i));
+ }
+}
+
void test_cxx11_tensor_assign()
{
@@ -235,4 +259,6 @@ void test_cxx11_tensor_assign()
CALL_SUBTEST(test_2d());
CALL_SUBTEST(test_3d());
CALL_SUBTEST(test_same_type());
+ CALL_SUBTEST(test_auto_resize());
+
}
diff --git a/unsupported/test/cxx11_tensor_contraction.cpp b/unsupported/test/cxx11_tensor_contraction.cpp
index fc67d500b..a37fcd967 100644
--- a/unsupported/test/cxx11_tensor_contraction.cpp
+++ b/unsupported/test/cxx11_tensor_contraction.cpp
@@ -141,6 +141,66 @@ static void test_multidims()
}
+static void test_holes() {
+ Tensor<float, 4> t1(2, 5, 7, 3);
+ Tensor<float, 5> t2(2, 7, 11, 13, 3);
+ t1.setRandom();
+ t2.setRandom();
+
+ Eigen::array<DimPair, 2> dims({{DimPair(0, 0), DimPair(3, 4)}});
+ Tensor<float, 5> result = t1.contract(t2, dims);
+ VERIFY_IS_EQUAL(result.dimension(0), 5);
+ VERIFY_IS_EQUAL(result.dimension(1), 7);
+ VERIFY_IS_EQUAL(result.dimension(2), 7);
+ VERIFY_IS_EQUAL(result.dimension(3), 11);
+ VERIFY_IS_EQUAL(result.dimension(4), 13);
+
+ for (int i = 0; i < 5; ++i) {
+ for (int j = 0; j < 5; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 5; ++l) {
+ for (int m = 0; m < 5; ++m) {
+ VERIFY_IS_APPROX(result(i, j, k, l, m),
+ t1(0, i, j, 0) * t2(0, k, l, m, 0) +
+ t1(1, i, j, 0) * t2(1, k, l, m, 0) +
+ t1(0, i, j, 1) * t2(0, k, l, m, 1) +
+ t1(1, i, j, 1) * t2(1, k, l, m, 1) +
+ t1(0, i, j, 2) * t2(0, k, l, m, 2) +
+ t1(1, i, j, 2) * t2(1, k, l, m, 2));
+ }
+ }
+ }
+ }
+ }
+}
+
+
+static void test_full_redux()
+{
+ Tensor<float, 2> t1(2, 2);
+ Tensor<float, 3> t2(2, 2, 2);
+ t1.setRandom();
+ t2.setRandom();
+
+ Eigen::array<DimPair, 2> dims({{DimPair(0, 0), DimPair(1, 1)}});
+ Tensor<float, 1> result = t1.contract(t2, dims);
+ VERIFY_IS_EQUAL(result.dimension(0), 2);
+ VERIFY_IS_APPROX(result(0), t1(0, 0) * t2(0, 0, 0) + t1(1, 0) * t2(1, 0, 0)
+ + t1(0, 1) * t2(0, 1, 0) + t1(1, 1) * t2(1, 1, 0));
+ VERIFY_IS_APPROX(result(1), t1(0, 0) * t2(0, 0, 1) + t1(1, 0) * t2(1, 0, 1)
+ + t1(0, 1) * t2(0, 1, 1) + t1(1, 1) * t2(1, 1, 1));
+
+ dims[0] = DimPair(1, 0);
+ dims[1] = DimPair(2, 1);
+ result = t2.contract(t1, dims);
+ VERIFY_IS_EQUAL(result.dimension(0), 2);
+ VERIFY_IS_APPROX(result(0), t1(0, 0) * t2(0, 0, 0) + t1(1, 0) * t2(0, 1, 0)
+ + t1(0, 1) * t2(0, 0, 1) + t1(1, 1) * t2(0, 1, 1));
+ VERIFY_IS_APPROX(result(1), t1(0, 0) * t2(1, 0, 0) + t1(1, 0) * t2(1, 1, 0)
+ + t1(0, 1) * t2(1, 0, 1) + t1(1, 1) * t2(1, 1, 1));
+}
+
+
static void test_expr()
{
Tensor<float, 2> mat1(2, 3);
@@ -160,10 +220,116 @@ static void test_expr()
}
+static void test_out_of_order_contraction()
+{
+ Tensor<float, 3> mat1(2, 2, 2);
+ Tensor<float, 3> mat2(2, 2, 2);
+
+ mat1.setRandom();
+ mat2.setRandom();
+
+ Tensor<float, 2> mat3(2, 2);
+
+ Eigen::array<DimPair, 2> dims({{DimPair(2, 0), DimPair(0, 2)}});
+ mat3 = mat1.contract(mat2, dims);
+
+ VERIFY_IS_APPROX(mat3(0, 0),
+ mat1(0,0,0)*mat2(0,0,0) + mat1(1,0,0)*mat2(0,0,1) +
+ mat1(0,0,1)*mat2(1,0,0) + mat1(1,0,1)*mat2(1,0,1));
+ VERIFY_IS_APPROX(mat3(1, 0),
+ mat1(0,1,0)*mat2(0,0,0) + mat1(1,1,0)*mat2(0,0,1) +
+ mat1(0,1,1)*mat2(1,0,0) + mat1(1,1,1)*mat2(1,0,1));
+ VERIFY_IS_APPROX(mat3(0, 1),
+ mat1(0,0,0)*mat2(0,1,0) + mat1(1,0,0)*mat2(0,1,1) +
+ mat1(0,0,1)*mat2(1,1,0) + mat1(1,0,1)*mat2(1,1,1));
+ VERIFY_IS_APPROX(mat3(1, 1),
+ mat1(0,1,0)*mat2(0,1,0) + mat1(1,1,0)*mat2(0,1,1) +
+ mat1(0,1,1)*mat2(1,1,0) + mat1(1,1,1)*mat2(1,1,1));
+
+ Eigen::array<DimPair, 2> dims2({{DimPair(0, 2), DimPair(2, 0)}});
+ mat3 = mat1.contract(mat2, dims2);
+
+ VERIFY_IS_APPROX(mat3(0, 0),
+ mat1(0,0,0)*mat2(0,0,0) + mat1(1,0,0)*mat2(0,0,1) +
+ mat1(0,0,1)*mat2(1,0,0) + mat1(1,0,1)*mat2(1,0,1));
+ VERIFY_IS_APPROX(mat3(1, 0),
+ mat1(0,1,0)*mat2(0,0,0) + mat1(1,1,0)*mat2(0,0,1) +
+ mat1(0,1,1)*mat2(1,0,0) + mat1(1,1,1)*mat2(1,0,1));
+ VERIFY_IS_APPROX(mat3(0, 1),
+ mat1(0,0,0)*mat2(0,1,0) + mat1(1,0,0)*mat2(0,1,1) +
+ mat1(0,0,1)*mat2(1,1,0) + mat1(1,0,1)*mat2(1,1,1));
+ VERIFY_IS_APPROX(mat3(1, 1),
+ mat1(0,1,0)*mat2(0,1,0) + mat1(1,1,0)*mat2(0,1,1) +
+ mat1(0,1,1)*mat2(1,1,0) + mat1(1,1,1)*mat2(1,1,1));
+
+}
+
+
+static void test_consistency()
+{
+ // this does something like testing (A*B)^T = (B^T * A^T)
+
+ Tensor<float, 3> mat1(4, 3, 5);
+ Tensor<float, 5> mat2(3, 2, 1, 5, 4);
+ mat1.setRandom();
+ mat2.setRandom();
+
+ Tensor<float, 4> mat3(5, 2, 1, 5);
+ Tensor<float, 4> mat4(2, 1, 5, 5);
+
+ // contract on dimensions of size 4 and 3
+ Eigen::array<DimPair, 2> dims1({{DimPair(0, 4), DimPair(1, 0)}});
+ Eigen::array<DimPair, 2> dims2({{DimPair(4, 0), DimPair(0, 1)}});
+
+ mat3 = mat1.contract(mat2, dims1);
+ mat4 = mat2.contract(mat1, dims2);
+
+ // check that these are equal except for ordering of dimensions
+ for (size_t i = 0; i < 5; i++) {
+ for (size_t j = 0; j < 10; j++) {
+ VERIFY_IS_APPROX(mat3.data()[i + 5 * j], mat4.data()[j + 10 * i]);
+ }
+ }
+}
+
+
+static void test_large_contraction()
+{
+ Tensor<float, 4> t_left(30, 50, 8, 31);
+ Tensor<float, 5> t_right(8, 31, 7, 20, 10);
+ Tensor<float, 5> t_result(30, 50, 7, 20, 10);
+
+ t_left.setRandom();
+ t_right.setRandom();
+
+ typedef Map<MatrixXf> MapXf;
+ MapXf m_left(t_left.data(), 1500, 248);
+ MapXf m_right(t_right.data(), 248, 1400);
+ MatrixXf m_result(1500, 1400);
+
+ // this contraction should be equivalent to a single matrix multiplication
+ Eigen::array<DimPair, 2> dims({{DimPair(2, 0), DimPair(3, 1)}});
+
+ // compute results by separate methods
+ t_result = t_left.contract(t_right, dims);
+ m_result = m_left * m_right;
+
+ for (size_t i = 0; i < t_result.dimensions().TotalSize(); i++) {
+ VERIFY(&t_result.data()[i] != &m_result.data()[i]);
+ VERIFY_IS_APPROX(t_result.data()[i], m_result.data()[i]);
+ }
+}
+
+
void test_cxx11_tensor_contraction()
{
CALL_SUBTEST(test_evals());
CALL_SUBTEST(test_scalar());
CALL_SUBTEST(test_multidims());
+ CALL_SUBTEST(test_holes());
+ CALL_SUBTEST(test_full_redux());
CALL_SUBTEST(test_expr());
+ CALL_SUBTEST(test_out_of_order_contraction());
+ CALL_SUBTEST(test_consistency());
+ CALL_SUBTEST(test_large_contraction());
}
diff --git a/unsupported/test/cxx11_tensor_device.cpp b/unsupported/test/cxx11_tensor_device.cpp
index caf2e9735..f331cb481 100644
--- a/unsupported/test/cxx11_tensor_device.cpp
+++ b/unsupported/test/cxx11_tensor_device.cpp
@@ -22,17 +22,43 @@ using Eigen::RowMajor;
// Context for evaluation on cpu
struct CPUContext {
- CPUContext(const Eigen::Tensor<float, 3>& in1, Eigen::Tensor<float, 3>& in2, Eigen::Tensor<float, 3>& out) : in1_(in1), in2_(in2), out_(out) { }
+ CPUContext(const Eigen::Tensor<float, 3>& in1, Eigen::Tensor<float, 3>& in2, Eigen::Tensor<float, 3>& out) : in1_(in1), in2_(in2), out_(out), kernel_1d_(2), kernel_2d_(Eigen::array<int, 2>(2,2)), kernel_3d_(Eigen::array<int, 3>(2,2,2)) {
+ kernel_1d_(0) = 3.14f;
+ kernel_1d_(1) = 2.7f;
+
+ kernel_2d_(Eigen::array<int, 2>(0,0)) = 3.14f;
+ kernel_2d_(Eigen::array<int, 2>(1,0)) = 2.7f;
+ kernel_2d_(Eigen::array<int, 2>(0,1)) = 0.2f;
+ kernel_2d_(Eigen::array<int, 2>(1,1)) = 7.0f;
+
+ kernel_3d_(Eigen::array<int, 3>(0,0,0)) = 3.14f;
+ kernel_3d_(Eigen::array<int, 3>(0,1,0)) = 2.7f;
+ kernel_3d_(Eigen::array<int, 3>(0,0,1)) = 0.2f;
+ kernel_3d_(Eigen::array<int, 3>(0,1,1)) = 7.0f;
+ kernel_3d_(Eigen::array<int, 3>(1,0,0)) = -1.0f;
+ kernel_3d_(Eigen::array<int, 3>(1,1,0)) = -0.3f;
+ kernel_3d_(Eigen::array<int, 3>(1,0,1)) = -0.7f;
+ kernel_3d_(Eigen::array<int, 3>(1,1,1)) = -0.5f;
+ }
+
+ const Eigen::DefaultDevice& device() const { return cpu_device_; }
const Eigen::Tensor<float, 3>& in1() const { return in1_; }
const Eigen::Tensor<float, 3>& in2() const { return in2_; }
- Eigen::TensorDevice<Eigen::Tensor<float, 3>, Eigen::DefaultDevice> out() { return TensorDevice<Eigen::Tensor<float, 3>, Eigen::DefaultDevice>(cpu_device_, out_); }
+ Eigen::Tensor<float, 3>& out() { return out_; }
+ const Eigen::Tensor<float, 1>& kernel1d() const { return kernel_1d_; }
+ const Eigen::Tensor<float, 2>& kernel2d() const { return kernel_2d_; }
+ const Eigen::Tensor<float, 3>& kernel3d() const { return kernel_3d_; }
private:
const Eigen::Tensor<float, 3>& in1_;
const Eigen::Tensor<float, 3>& in2_;
Eigen::Tensor<float, 3>& out_;
+ Eigen::Tensor<float, 1> kernel_1d_;
+ Eigen::Tensor<float, 2> kernel_2d_;
+ Eigen::Tensor<float, 3> kernel_3d_;
+
Eigen::DefaultDevice cpu_device_;
};
@@ -40,19 +66,45 @@ struct CPUContext {
// Context for evaluation on GPU
struct GPUContext {
GPUContext(const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in1, Eigen::TensorMap<Eigen::Tensor<float, 3> >& in2, Eigen::TensorMap<Eigen::Tensor<float, 3> >& out) : in1_(in1), in2_(in2), out_(out), gpu_device_(&stream_) {
- cudaStreamCreate(&stream_);
+ assert(cudaMalloc((void**)(&kernel_1d_), 2*sizeof(float)) == cudaSuccess);
+ float kernel_1d_val[] = {3.14f, 2.7f};
+ assert(cudaMemcpy(kernel_1d_, kernel_1d_val, 2*sizeof(float), cudaMemcpyHostToDevice) == cudaSuccess);
+
+ assert(cudaMalloc((void**)(&kernel_2d_), 4*sizeof(float)) == cudaSuccess);
+ float kernel_2d_val[] = {3.14f, 2.7f, 0.2f, 7.0f};
+ assert(cudaMemcpy(kernel_2d_, kernel_2d_val, 4*sizeof(float), cudaMemcpyHostToDevice) == cudaSuccess);
+
+ assert(cudaMalloc((void**)(&kernel_3d_), 8*sizeof(float)) == cudaSuccess);
+ float kernel_3d_val[] = {3.14f, -1.0f, 2.7f, -0.3f, 0.2f, -0.7f, 7.0f, -0.5f};
+ assert(cudaMemcpy(kernel_3d_, kernel_3d_val, 8*sizeof(float), cudaMemcpyHostToDevice) == cudaSuccess);
+
+ assert(cudaStreamCreate(&stream_) == cudaSuccess);
}
~GPUContext() {
- cudaStreamDestroy(stream_);
+ assert(cudaFree(kernel_1d_) == cudaSuccess);
+ assert(cudaFree(kernel_2d_) == cudaSuccess);
+ assert(cudaFree(kernel_3d_) == cudaSuccess);
+ assert(cudaStreamDestroy(stream_) == cudaSuccess);
}
+
+ const Eigen::GpuDevice& device() const { return gpu_device_; }
+
const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in1() const { return in1_; }
const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in2() const { return in2_; }
- Eigen::TensorDevice<Eigen::TensorMap<Eigen::Tensor<float, 3> >, Eigen::GpuDevice> out() { return TensorDevice<Eigen::TensorMap<Eigen::Tensor<float, 3> >, Eigen::GpuDevice>(gpu_device_, out_); }
+ Eigen::TensorMap<Eigen::Tensor<float, 3> >& out() { return out_; }
+ Eigen::TensorMap<Eigen::Tensor<float, 1> > kernel1d() const { return Eigen::TensorMap<Eigen::Tensor<float, 1> >(kernel_1d_, 2); }
+ Eigen::TensorMap<Eigen::Tensor<float, 2> > kernel2d() const { return Eigen::TensorMap<Eigen::Tensor<float, 2> >(kernel_2d_, Eigen::array<int, 2>(2, 2)); }
+ Eigen::TensorMap<Eigen::Tensor<float, 3> > kernel3d() const { return Eigen::TensorMap<Eigen::Tensor<float, 3> >(kernel_3d_, Eigen::array<int, 3>(2, 2, 2)); }
private:
const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in1_;
const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in2_;
Eigen::TensorMap<Eigen::Tensor<float, 3> >& out_;
+
+ float* kernel_1d_;
+ float* kernel_2d_;
+ float* kernel_3d_;
+
cudaStream_t stream_;
Eigen::GpuDevice gpu_device_;
};
@@ -62,49 +114,151 @@ struct GPUContext {
template <typename Context>
static void test_contextual_eval(Context* context)
{
- context->out() = context->in1() + context->in2() * 3.14f + context->in1().constant(2.718f);
+ context->out().device(context->device()) = context->in1() + context->in2() * 3.14f + context->in1().constant(2.718f);
}
template <typename Context>
static void test_forced_contextual_eval(Context* context)
{
- context->out() = (context->in1() + context->in2()).eval() * 3.14f + context->in1().constant(2.718f);
+ context->out().device(context->device()) = (context->in1() + context->in2()).eval() * 3.14f + context->in1().constant(2.718f);
+}
+
+template <typename Context>
+static void test_contraction(Context* context)
+{
+ Eigen::array<std::pair<int, int>, 2> dims;
+ dims[0] = std::make_pair(1, 1);
+ dims[1] = std::make_pair(2, 2);
+
+ Eigen::array<int, 2> shape(40, 50*70);
+
+ Eigen::DSizes<int, 2> indices(0,0);
+ Eigen::DSizes<int, 2> sizes(40,40);
+
+ context->out().reshape(shape).slice(indices, sizes).device(context->device()) = context->in1().contract(context->in2(), dims);
+}
+
+
+template <typename Context>
+static void test_1d_convolution(Context* context)
+{
+ Eigen::DSizes<int, 3> indices(Eigen::array<int, 3>(0,0,0));
+ Eigen::DSizes<int, 3> sizes(Eigen::array<int, 3>(40,49,70));
+
+ Eigen::array<int, 1> dims(1);
+ context->out().slice(indices, sizes).device(context->device()) = context->in1().convolve(context->kernel1d(), dims);
+}
+
+template <typename Context>
+static void test_2d_convolution(Context* context)
+{
+ Eigen::DSizes<int, 3> indices(Eigen::array<int, 3>(0,0,0));
+ Eigen::DSizes<int, 3> sizes(Eigen::array<int, 3>(40,49,69));
+
+ Eigen::array<int, 2> dims(1,2);
+ context->out().slice(indices, sizes).device(context->device()) = context->in1().convolve(context->kernel2d(), dims);
+}
+
+template <typename Context>
+static void test_3d_convolution(Context* context)
+{
+ Eigen::DSizes<int, 3> indices(Eigen::array<int, 3>(0,0,0));
+ Eigen::DSizes<int, 3> sizes(Eigen::array<int, 3>(39,49,69));
+
+ Eigen::array<int, 3> dims(0,1,2);
+ context->out().slice(indices, sizes).device(context->device()) = context->in1().convolve(context->kernel3d(), dims);
}
+
static void test_cpu() {
- Eigen::Tensor<float, 3> in1(Eigen::array<int, 3>(2,3,7));
- Eigen::Tensor<float, 3> in2(Eigen::array<int, 3>(2,3,7));
- Eigen::Tensor<float, 3> out(Eigen::array<int, 3>(2,3,7));
+ Eigen::Tensor<float, 3> in1(Eigen::array<int, 3>(40,50,70));
+ Eigen::Tensor<float, 3> in2(Eigen::array<int, 3>(40,50,70));
+ Eigen::Tensor<float, 3> out(Eigen::array<int, 3>(40,50,70));
- in1.setRandom();
- in2.setRandom();
+ in1 = in1.random() + in1.constant(10.0f);
+ in2 = in2.random() + in2.constant(10.0f);
CPUContext context(in1, in2, out);
test_contextual_eval(&context);
- for (int i = 0; i < 2; ++i) {
- for (int j = 0; j < 3; ++j) {
- for (int k = 0; k < 7; ++k) {
+ for (int i = 0; i < 40; ++i) {
+ for (int j = 0; j < 50; ++j) {
+ for (int k = 0; k < 70; ++k) {
VERIFY_IS_APPROX(out(Eigen::array<int, 3>(i,j,k)), in1(Eigen::array<int, 3>(i,j,k)) + in2(Eigen::array<int, 3>(i,j,k)) * 3.14f + 2.718f);
}
}
}
test_forced_contextual_eval(&context);
- for (int i = 0; i < 2; ++i) {
- for (int j = 0; j < 3; ++j) {
- for (int k = 0; k < 7; ++k) {
+ for (int i = 0; i < 40; ++i) {
+ for (int j = 0; j < 50; ++j) {
+ for (int k = 0; k < 70; ++k) {
VERIFY_IS_APPROX(out(Eigen::array<int, 3>(i,j,k)), (in1(Eigen::array<int, 3>(i,j,k)) + in2(Eigen::array<int, 3>(i,j,k))) * 3.14f + 2.718f);
}
}
}
+
+ test_contraction(&context);
+ for (int i = 0; i < 40; ++i) {
+ for (int j = 0; j < 40; ++j) {
+ const float result = out(Eigen::array<int, 3>(i,j,0));
+ float expected = 0;
+ for (int k = 0; k < 50; ++k) {
+ for (int l = 0; l < 70; ++l) {
+ expected += in1(Eigen::array<int, 3>(i, k, l)) * in2(Eigen::array<int, 3>(j, k, l));
+ }
+ }
+ VERIFY_IS_APPROX(expected, result);
+ }
+ }
+
+ test_1d_convolution(&context);
+ for (int i = 0; i < 40; ++i) {
+ for (int j = 0; j < 49; ++j) {
+ for (int k = 0; k < 70; ++k) {
+ VERIFY_IS_APPROX(out(Eigen::array<int, 3>(i,j,k)), (in1(Eigen::array<int, 3>(i,j,k)) * 3.14f + in1(Eigen::array<int, 3>(i,j+1,k)) * 2.7f));
+ }
+ }
+ }
+
+ test_2d_convolution(&context);
+ for (int i = 0; i < 40; ++i) {
+ for (int j = 0; j < 49; ++j) {
+ for (int k = 0; k < 69; ++k) {
+ const float result = out(Eigen::array<int, 3>(i,j,k));
+ const float expected = (in1(Eigen::array<int, 3>(i,j,k)) * 3.14f + in1(Eigen::array<int, 3>(i,j+1,k)) * 2.7f) +
+ (in1(Eigen::array<int, 3>(i,j,k+1)) * 0.2f + in1(Eigen::array<int, 3>(i,j+1,k+1)) * 7.0f);
+ if (fabs(expected) < 1e-4 && fabs(result) < 1e-4) {
+ continue;
+ }
+ VERIFY_IS_APPROX(expected, result);
+ }
+ }
+ }
+
+ test_3d_convolution(&context);
+ for (int i = 0; i < 39; ++i) {
+ for (int j = 0; j < 49; ++j) {
+ for (int k = 0; k < 69; ++k) {
+ const float result = out(Eigen::array<int, 3>(i,j,k));
+ const float expected = (in1(Eigen::array<int, 3>(i,j,k)) * 3.14f + in1(Eigen::array<int, 3>(i,j+1,k)) * 2.7f +
+ in1(Eigen::array<int, 3>(i,j,k+1)) * 0.2f + in1(Eigen::array<int, 3>(i,j+1,k+1)) * 7.0f) +
+ (in1(Eigen::array<int, 3>(i+1,j,k)) * -1.0f + in1(Eigen::array<int, 3>(i+1,j+1,k)) * -0.3f +
+ in1(Eigen::array<int, 3>(i+1,j,k+1)) * -0.7f + in1(Eigen::array<int, 3>(i+1,j+1,k+1)) * -0.5f);
+ if (fabs(expected) < 1e-4 && fabs(result) < 1e-4) {
+ continue;
+ }
+ VERIFY_IS_APPROX(expected, result);
+ }
+ }
+ }
}
static void test_gpu() {
- Eigen::Tensor<float, 3> in1(Eigen::array<int, 3>(2,3,7));
- Eigen::Tensor<float, 3> in2(Eigen::array<int, 3>(2,3,7));
- Eigen::Tensor<float, 3> out(Eigen::array<int, 3>(2,3,7));
- in1.setRandom();
- in2.setRandom();
+ Eigen::Tensor<float, 3> in1(Eigen::array<int, 3>(40,50,70));
+ Eigen::Tensor<float, 3> in2(Eigen::array<int, 3>(40,50,70));
+ Eigen::Tensor<float, 3> out(Eigen::array<int, 3>(40,50,70));
+ in1 = in1.random() + in1.constant(10.0f);
+ in2 = in2.random() + in2.constant(10.0f);
std::size_t in1_bytes = in1.size() * sizeof(float);
std::size_t in2_bytes = in2.size() * sizeof(float);
@@ -120,32 +274,87 @@ static void test_gpu() {
cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_in2, in2.data(), in2_bytes, cudaMemcpyHostToDevice);
- Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in1(d_in1, Eigen::array<int, 3>(2,3,7));
- Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in2(d_in2, Eigen::array<int, 3>(2,3,7));
- Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_out(d_out, Eigen::array<int, 3>(2,3,7));
+ Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in1(d_in1, Eigen::array<int, 3>(40,50,70));
+ Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in2(d_in2, Eigen::array<int, 3>(40,50,70));
+ Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_out(d_out, Eigen::array<int, 3>(40,50,70));
GPUContext context(gpu_in1, gpu_in2, gpu_out);
test_contextual_eval(&context);
- cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost);
- for (int i = 0; i < 2; ++i) {
- for (int j = 0; j < 3; ++j) {
- for (int k = 0; k < 7; ++k) {
+ assert(cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost) == cudaSuccess);
+ for (int i = 0; i < 40; ++i) {
+ for (int j = 0; j < 50; ++j) {
+ for (int k = 0; k < 70; ++k) {
VERIFY_IS_APPROX(out(Eigen::array<int, 3>(i,j,k)), in1(Eigen::array<int, 3>(i,j,k)) + in2(Eigen::array<int, 3>(i,j,k)) * 3.14f + 2.718f);
}
}
}
test_forced_contextual_eval(&context);
- cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost);
- for (int i = 0; i < 2; ++i) {
- for (int j = 0; j < 3; ++j) {
- for (int k = 0; k < 7; ++k) {
+ assert(cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost) == cudaSuccess);
+ for (int i = 0; i < 40; ++i) {
+ for (int j = 0; j < 50; ++j) {
+ for (int k = 0; k < 70; ++k) {
VERIFY_IS_APPROX(out(Eigen::array<int, 3>(i,j,k)), (in1(Eigen::array<int, 3>(i,j,k)) + in2(Eigen::array<int, 3>(i,j,k))) * 3.14f + 2.718f);
}
}
}
-}
+ test_contraction(&context);
+ assert(cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost) == cudaSuccess);
+ for (int i = 0; i < 40; ++i) {
+ for (int j = 0; j < 40; ++j) {
+ const float result = out(Eigen::array<int, 3>(i,j,0));
+ float expected = 0;
+ for (int k = 0; k < 50; ++k) {
+ for (int l = 0; l < 70; ++l) {
+ expected += in1(Eigen::array<int, 3>(i, k, l)) * in2(Eigen::array<int, 3>(j, k, l));
+ }
+ }
+ VERIFY_IS_APPROX(expected, result);
+ }
+ }
+
+ test_1d_convolution(&context);
+ assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, context.device().stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(context.device().stream()) == cudaSuccess);
+ for (int i = 0; i < 40; ++i) {
+ for (int j = 0; j < 49; ++j) {
+ for (int k = 0; k < 70; ++k) {
+ VERIFY_IS_APPROX(out(Eigen::array<int, 3>(i,j,k)), (in1(Eigen::array<int, 3>(i,j,k)) * 3.14f + in1(Eigen::array<int, 3>(i,j+1,k)) * 2.7f));
+ }
+ }
+ }
+
+ test_2d_convolution(&context);
+ assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, context.device().stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(context.device().stream()) == cudaSuccess);
+ for (int i = 0; i < 40; ++i) {
+ for (int j = 0; j < 49; ++j) {
+ for (int k = 0; k < 69; ++k) {
+ const float result = out(Eigen::array<int, 3>(i,j,k));
+ const float expected = (in1(Eigen::array<int, 3>(i,j,k)) * 3.14f + in1(Eigen::array<int, 3>(i,j+1,k)) * 2.7f +
+ in1(Eigen::array<int, 3>(i,j,k+1)) * 0.2f + in1(Eigen::array<int, 3>(i,j+1,k+1)) * 7.0f);
+ VERIFY_IS_APPROX(expected, result);
+ }
+ }
+ }
+
+ test_3d_convolution(&context);
+ assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, context.device().stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(context.device().stream()) == cudaSuccess);
+ for (int i = 0; i < 39; ++i) {
+ for (int j = 0; j < 49; ++j) {
+ for (int k = 0; k < 69; ++k) {
+ const float result = out(Eigen::array<int, 3>(i,j,k));
+ const float expected = (in1(Eigen::array<int, 3>(i,j,k)) * 3.14f + in1(Eigen::array<int, 3>(i,j+1,k)) * 2.7f +
+ in1(Eigen::array<int, 3>(i,j,k+1)) * 0.2f + in1(Eigen::array<int, 3>(i,j+1,k+1)) * 7.0f +
+ in1(Eigen::array<int, 3>(i+1,j,k)) * -1.0f + in1(Eigen::array<int, 3>(i+1,j+1,k)) * -0.3f +
+ in1(Eigen::array<int, 3>(i+1,j,k+1)) * -0.7f + in1(Eigen::array<int, 3>(i+1,j+1,k+1)) * -0.5f);
+ VERIFY_IS_APPROX(expected, result);
+ }
+ }
+ }
+}
void test_cxx11_tensor_device()
diff --git a/unsupported/test/cxx11_tensor_shuffling.cpp b/unsupported/test/cxx11_tensor_shuffling.cpp
index 92dd01a52..5ab8b6821 100644
--- a/unsupported/test/cxx11_tensor_shuffling.cpp
+++ b/unsupported/test/cxx11_tensor_shuffling.cpp
@@ -106,11 +106,58 @@ static void test_expr_shuffling()
}
}
}
+
+ dst_slice_start[0] = 0;
+ result.setRandom();
+ for (int i = 0; i < 5; ++i) {
+ result.slice(dst_slice_start, dst_slice_dim) =
+ tensor.shuffle(shuffles).slice(dst_slice_start, dst_slice_dim);
+ dst_slice_start[0] += 1;
+ }
+
+ for (int i = 0; i < expected.dimension(0); ++i) {
+ for (int j = 0; j < expected.dimension(1); ++j) {
+ for (int k = 0; k < expected.dimension(2); ++k) {
+ for (int l = 0; l < expected.dimension(3); ++l) {
+ VERIFY_IS_EQUAL(result(i,j,k,l), expected(i,j,k,l));
+ }
+ }
+ }
+ }
}
+static void test_shuffling_as_value()
+{
+ Tensor<float, 4> tensor(2,3,5,7);
+ tensor.setRandom();
+ array<ptrdiff_t, 4> shuffles;
+ shuffles[2] = 0;
+ shuffles[3] = 1;
+ shuffles[1] = 2;
+ shuffles[0] = 3;
+ Tensor<float, 4> shuffle(5,7,3,2);
+ shuffle.shuffle(shuffles) = tensor;
+
+ VERIFY_IS_EQUAL(shuffle.dimension(0), 5);
+ VERIFY_IS_EQUAL(shuffle.dimension(1), 7);
+ VERIFY_IS_EQUAL(shuffle.dimension(2), 3);
+ VERIFY_IS_EQUAL(shuffle.dimension(3), 2);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l), shuffle(k,l,j,i));
+ }
+ }
+ }
+ }
+}
+
void test_cxx11_tensor_shuffling()
{
CALL_SUBTEST(test_simple_shuffling());
CALL_SUBTEST(test_expr_shuffling());
+ CALL_SUBTEST(test_shuffling_as_value());
}
diff --git a/unsupported/test/cxx11_tensor_simple.cpp b/unsupported/test/cxx11_tensor_simple.cpp
index 1455f2a4c..a70591c82 100644
--- a/unsupported/test/cxx11_tensor_simple.cpp
+++ b/unsupported/test/cxx11_tensor_simple.cpp
@@ -257,12 +257,38 @@ static void test_simple_assign()
VERIFY_IS_EQUAL((e2(1,0,2)), -1);
}
+static void test_resize()
+{
+ Tensor<int, 3> epsilon;
+ epsilon.resize(2,3,7);
+ VERIFY_IS_EQUAL(epsilon.dimension(0), 2);
+ VERIFY_IS_EQUAL(epsilon.dimension(1), 3);
+ VERIFY_IS_EQUAL(epsilon.dimension(2), 7);
+ VERIFY_IS_EQUAL(epsilon.dimensions().TotalSize(), 2ul*3*7);
+
+ const int* old_data = epsilon.data();
+ epsilon.resize(3,2,7);
+ VERIFY_IS_EQUAL(epsilon.dimension(0), 3);
+ VERIFY_IS_EQUAL(epsilon.dimension(1), 2);
+ VERIFY_IS_EQUAL(epsilon.dimension(2), 7);
+ VERIFY_IS_EQUAL(epsilon.dimensions().TotalSize(), 2ul*3*7);
+ VERIFY_IS_EQUAL(epsilon.data(), old_data);
+
+ epsilon.resize(3,5,7);
+ VERIFY_IS_EQUAL(epsilon.dimension(0), 3);
+ VERIFY_IS_EQUAL(epsilon.dimension(1), 5);
+ VERIFY_IS_EQUAL(epsilon.dimension(2), 7);
+ VERIFY_IS_EQUAL(epsilon.dimensions().TotalSize(), 3ul*5*7);
+ VERIFY_IS_NOT_EQUAL(epsilon.data(), old_data);
+}
+
void test_cxx11_tensor_simple()
{
CALL_SUBTEST(test_1d());
CALL_SUBTEST(test_2d());
CALL_SUBTEST(test_3d());
CALL_SUBTEST(test_simple_assign());
+ CALL_SUBTEST(test_resize());
}
/*