aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/test
diff options
context:
space:
mode:
authorGravatar Eugene Zhulenev <ezhulenev@google.com>2019-12-10 11:58:30 -0800
committerGravatar Eugene Zhulenev <ezhulenev@google.com>2019-12-10 14:31:44 -0800
commitdbca11e8805ec07660d8f966a1884ad0be302f15 (patch)
tree9da1438132a9a40de7ca3abafec2e559eb0449e3 /unsupported/test
parentc49f0d851ab77c9e4d782b453b4b0428bce903d3 (diff)
Remove TensorBlock.h and old TensorBlock/BlockMapper
Diffstat (limited to 'unsupported/test')
-rw-r--r--unsupported/test/cxx11_tensor_block_access.cpp404
-rw-r--r--unsupported/test/cxx11_tensor_block_eval.cpp17
-rw-r--r--unsupported/test/cxx11_tensor_block_io.cpp61
3 files changed, 242 insertions, 240 deletions
diff --git a/unsupported/test/cxx11_tensor_block_access.cpp b/unsupported/test/cxx11_tensor_block_access.cpp
index 8d3ca84c8..b56601ebd 100644
--- a/unsupported/test/cxx11_tensor_block_access.cpp
+++ b/unsupported/test/cxx11_tensor_block_access.cpp
@@ -19,6 +19,7 @@ using Eigen::Tensor;
using Eigen::Index;
using Eigen::RowMajor;
using Eigen::ColMajor;
+using Eigen::internal::TensorBlockV2ShapeType;
template<typename T>
@@ -26,15 +27,15 @@ static const T& choose(int layout, const T& col, const T& row) {
return layout == ColMajor ? col : row;
}
-static internal::TensorBlockShapeType RandomShape() {
+static TensorBlockV2ShapeType RandomShape() {
return internal::random<bool>()
- ? internal::kUniformAllDims
- : internal::kSkewedInnerDims;
+ ? TensorBlockV2ShapeType::kUniformAllDims
+ : TensorBlockV2ShapeType::kSkewedInnerDims;
}
template <int NumDims>
-static Index RandomTargetSize(const DSizes<Index, NumDims>& dims) {
- return internal::random<Index>(1, dims.TotalSize());
+static size_t RandomTargetSize(const DSizes<Index, NumDims>& dims) {
+ return internal::random<size_t>(1, dims.TotalSize());
}
template <int NumDims>
@@ -66,55 +67,43 @@ static void Debug(DSizes<Index, NumDims> dims) {
template <int Layout>
static void test_block_mapper_sanity()
{
- typedef internal::TensorBlockMapper<int, Index, 2, Layout> TensorBlockMapper;
+ typedef internal::TensorBlockV2Mapper<2, Layout> TensorBlockMapper;
DSizes<Index, 2> tensor_dims(100, 100);
// Test uniform blocks.
TensorBlockMapper uniform_block_mapper(
- tensor_dims, internal::kUniformAllDims, 100);
+ tensor_dims, {TensorBlockV2ShapeType::kUniformAllDims, 100});
- VERIFY_IS_EQUAL(uniform_block_mapper.total_block_count(), 100);
- VERIFY_IS_EQUAL(uniform_block_mapper.block_dims_total_size(), 100);
+ VERIFY_IS_EQUAL(uniform_block_mapper.blockCount(), 100);
+ VERIFY_IS_EQUAL(uniform_block_mapper.blockTotalSize(), 100);
// 10x10 blocks
- typename TensorBlockMapper::Block uniform_b0 = uniform_block_mapper.GetBlockForIndex(0, NULL);
- VERIFY_IS_EQUAL(uniform_b0.block_sizes().at(0), 10);
- VERIFY_IS_EQUAL(uniform_b0.block_sizes().at(1), 10);
- // Depending on a layout we stride by cols rows.
- VERIFY_IS_EQUAL(uniform_b0.block_strides().at(0), choose(Layout, 1, 10));
- VERIFY_IS_EQUAL(uniform_b0.block_strides().at(1), choose(Layout, 10, 1));
- // Tensor strides depend only on a layout and not on the block size.
- VERIFY_IS_EQUAL(uniform_b0.tensor_strides().at(0), choose(Layout, 1, 100));
- VERIFY_IS_EQUAL(uniform_b0.tensor_strides().at(1), choose(Layout, 100, 1));
+ auto uniform_b0 = uniform_block_mapper.blockDescriptor(0);
+ VERIFY_IS_EQUAL(uniform_b0.dimensions().at(0), 10);
+ VERIFY_IS_EQUAL(uniform_b0.dimensions().at(1), 10);
// Test skewed to inner dims blocks.
TensorBlockMapper skewed_block_mapper(
- tensor_dims, internal::kSkewedInnerDims, 100);
+ tensor_dims, {TensorBlockV2ShapeType::kSkewedInnerDims, 100});
- VERIFY_IS_EQUAL(skewed_block_mapper.total_block_count(), 100);
- VERIFY_IS_EQUAL(skewed_block_mapper.block_dims_total_size(), 100);
+ VERIFY_IS_EQUAL(skewed_block_mapper.blockCount(), 100);
+ VERIFY_IS_EQUAL(skewed_block_mapper.blockTotalSize(), 100);
// 1x100 (100x1) rows/cols depending on a tensor layout.
- typename TensorBlockMapper::Block skewed_b0 = skewed_block_mapper.GetBlockForIndex(0, NULL);
- VERIFY_IS_EQUAL(skewed_b0.block_sizes().at(0), choose(Layout, 100, 1));
- VERIFY_IS_EQUAL(skewed_b0.block_sizes().at(1), choose(Layout, 1, 100));
- // Depending on a layout we stride by cols rows.
- VERIFY_IS_EQUAL(skewed_b0.block_strides().at(0), choose(Layout, 1, 100));
- VERIFY_IS_EQUAL(skewed_b0.block_strides().at(1), choose(Layout, 100, 1));
- // Tensor strides depend only on a layout and not on the block size.
- VERIFY_IS_EQUAL(skewed_b0.tensor_strides().at(0), choose(Layout, 1, 100));
- VERIFY_IS_EQUAL(skewed_b0.tensor_strides().at(1), choose(Layout, 100, 1));
+ auto skewed_b0 = skewed_block_mapper.blockDescriptor(0);
+ VERIFY_IS_EQUAL(skewed_b0.dimensions().at(0), choose(Layout, 100, 1));
+ VERIFY_IS_EQUAL(skewed_b0.dimensions().at(1), choose(Layout, 1, 100));
}
// Given a TensorBlock "visit" every element accessible though it, and a keep an
// index in the visited set. Verify that every coeff accessed only once.
-template <typename T, int Layout, int NumDims>
+template<int NumDims, int Layout>
static void UpdateCoeffSet(
- const internal::TensorBlock<T, Index, NumDims, Layout>& block,
+ const DSizes<Index, NumDims>& tensor_strides,
+ const internal::TensorBlockDescriptor<NumDims>& block,
Index first_coeff_index, int dim_index, std::set<Index>* visited_coeffs) {
- const DSizes<Index, NumDims>& block_sizes = block.block_sizes();
- const DSizes<Index, NumDims>& tensor_strides = block.tensor_strides();
+ const DSizes<Index, NumDims>& block_sizes = block.dimensions();
for (int i = 0; i < block_sizes[dim_index]; ++i) {
if (tensor_strides[dim_index] == 1) {
@@ -123,7 +112,7 @@ static void UpdateCoeffSet(
VERIFY_IS_EQUAL(inserted.second, true);
} else {
int next_dim_index = dim_index + choose(Layout, -1, 1);
- UpdateCoeffSet<T, Layout, NumDims>(block, first_coeff_index,
+ UpdateCoeffSet<NumDims, Layout>(tensor_strides, block, first_coeff_index,
next_dim_index, visited_coeffs);
first_coeff_index += tensor_strides[dim_index];
}
@@ -132,22 +121,22 @@ static void UpdateCoeffSet(
template <typename T, int NumDims, int Layout>
static void test_block_mapper_maps_every_element() {
- typedef internal::TensorBlock<T, Index, NumDims, Layout> TensorBlock;
- typedef internal::TensorBlockMapper<T, Index, NumDims, Layout> TensorBlockMapper;
+ typedef internal::TensorBlockV2Mapper<NumDims, Layout> TensorBlockMapper;
DSizes<Index, NumDims> dims = RandomDims<NumDims>();
+ DSizes<Index, NumDims> strides = internal::strides<Layout>(dims);
// Keep track of elements indices available via block access.
std::set<Index> coeff_set;
// Try different combinations of block types and sizes.
- TensorBlockMapper block_mapper(dims, RandomShape(), RandomTargetSize(dims));
+ TensorBlockMapper block_mapper(dims, {RandomShape(), RandomTargetSize(dims)});
- for (int i = 0; i < block_mapper.total_block_count(); ++i) {
- TensorBlock block = block_mapper.GetBlockForIndex(i, NULL);
- UpdateCoeffSet<T, Layout, NumDims>(block, block.first_coeff_index(),
- choose(Layout, NumDims - 1, 0),
- &coeff_set);
+ for (int i = 0; i < block_mapper.blockCount(); ++i) {
+ auto block = block_mapper.blockDescriptor(i);
+ UpdateCoeffSet<NumDims, Layout>(strides, block, block.offset(),
+ choose(Layout, NumDims - 1, 0),
+ &coeff_set);
}
// Verify that every coefficient in the original Tensor is accessible through
@@ -237,20 +226,21 @@ public:
template <int Layout>
static void test_uniform_block_shape()
{
- typedef internal::TensorBlock<int, Index, 5, Layout> TensorBlock;
- typedef internal::TensorBlockMapper<int, Index, 5, Layout> TensorBlockMapper;
+ typedef internal::TensorBlockDescriptor<5> TensorBlock;
+ typedef internal::TensorBlockV2Mapper<5, Layout> TensorBlockMapper;
{
// Test shape 'UniformAllDims' with uniform 'max_coeff count'.
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const Index max_coeff_count = 5 * 5 * 5 * 5 * 5;
- TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
- max_coeff_count);
- TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
+ TensorBlockMapper
+ block_mapper(dims, {TensorBlockV2ShapeType::kUniformAllDims,
+ max_coeff_count});
+ TensorBlock block = block_mapper.blockDescriptor(0);
for (int i = 0; i < 5; ++i) {
- VERIFY_IS_EQUAL(5, block.block_sizes()[i]);
+ VERIFY_IS_EQUAL(5, block.dimensions()[i]);
}
- VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
}
// Test shape 'UniformAllDims' with larger 'max_coeff count' which spills
@@ -258,25 +248,27 @@ static void test_uniform_block_shape()
if (Layout == ColMajor) {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const Index max_coeff_count = 7 * 5 * 5 * 5 * 5;
- TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
- max_coeff_count);
- TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
- VERIFY_IS_EQUAL(7, block.block_sizes()[0]);
+ TensorBlockMapper
+ block_mapper(dims, {TensorBlockV2ShapeType::kUniformAllDims,
+ max_coeff_count});
+ TensorBlock block = block_mapper.blockDescriptor(0);
+ VERIFY_IS_EQUAL(7, block.dimensions()[0]);
for (int i = 1; i < 5; ++i) {
- VERIFY_IS_EQUAL(5, block.block_sizes()[i]);
+ VERIFY_IS_EQUAL(5, block.dimensions()[i]);
}
- VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
} else {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const Index max_coeff_count = 5 * 5 * 5 * 5 * 6;
- TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
- max_coeff_count);
- TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
- VERIFY_IS_EQUAL(6, block.block_sizes()[4]);
+ TensorBlockMapper
+ block_mapper(dims, {TensorBlockV2ShapeType::kUniformAllDims,
+ max_coeff_count});
+ TensorBlock block = block_mapper.blockDescriptor(0);
+ VERIFY_IS_EQUAL(6, block.dimensions()[4]);
for (int i = 3; i >= 0; --i) {
- VERIFY_IS_EQUAL(5, block.block_sizes()[i]);
+ VERIFY_IS_EQUAL(5, block.dimensions()[i]);
}
- VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
}
// Test shape 'UniformAllDims' with larger 'max_coeff count' which spills
@@ -284,25 +276,27 @@ static void test_uniform_block_shape()
if (Layout == ColMajor) {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const Index max_coeff_count = 11 * 5 * 5 * 5 * 5;
- TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
- max_coeff_count);
- TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
- VERIFY_IS_EQUAL(11, block.block_sizes()[0]);
+ TensorBlockMapper
+ block_mapper(dims, {TensorBlockV2ShapeType::kUniformAllDims,
+ max_coeff_count});
+ TensorBlock block = block_mapper.blockDescriptor(0);
+ VERIFY_IS_EQUAL(11, block.dimensions()[0]);
for (int i = 1; i < 5; ++i) {
- VERIFY_IS_EQUAL(5, block.block_sizes()[i]);
+ VERIFY_IS_EQUAL(5, block.dimensions()[i]);
}
- VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
} else {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const Index max_coeff_count = 5 * 5 * 5 * 5 * 7;
- TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
- max_coeff_count);
- TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
- VERIFY_IS_EQUAL(7, block.block_sizes()[4]);
+ TensorBlockMapper
+ block_mapper(dims, {TensorBlockV2ShapeType::kUniformAllDims,
+ max_coeff_count});
+ TensorBlock block = block_mapper.blockDescriptor(0);
+ VERIFY_IS_EQUAL(7, block.dimensions()[4]);
for (int i = 3; i >= 0; --i) {
- VERIFY_IS_EQUAL(5, block.block_sizes()[i]);
+ VERIFY_IS_EQUAL(5, block.dimensions()[i]);
}
- VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
}
// Test shape 'UniformAllDims' with larger 'max_coeff count' which spills
@@ -310,111 +304,119 @@ static void test_uniform_block_shape()
if (Layout == ColMajor) {
DSizes<Index, 5> dims(7, 5, 6, 17, 7);
const Index max_coeff_count = 7 * 5 * 6 * 7 * 5;
- TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
- max_coeff_count);
- TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
- VERIFY_IS_EQUAL(7, block.block_sizes()[0]);
- VERIFY_IS_EQUAL(5, block.block_sizes()[1]);
- VERIFY_IS_EQUAL(6, block.block_sizes()[2]);
- VERIFY_IS_EQUAL(7, block.block_sizes()[3]);
- VERIFY_IS_EQUAL(5, block.block_sizes()[4]);
- VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ TensorBlockMapper
+ block_mapper(dims, {TensorBlockV2ShapeType::kUniformAllDims,
+ max_coeff_count});
+ TensorBlock block = block_mapper.blockDescriptor(0);
+ VERIFY_IS_EQUAL(7, block.dimensions()[0]);
+ VERIFY_IS_EQUAL(5, block.dimensions()[1]);
+ VERIFY_IS_EQUAL(6, block.dimensions()[2]);
+ VERIFY_IS_EQUAL(7, block.dimensions()[3]);
+ VERIFY_IS_EQUAL(5, block.dimensions()[4]);
+ VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
} else {
DSizes<Index, 5> dims(7, 5, 6, 9, 7);
const Index max_coeff_count = 5 * 5 * 5 * 6 * 7;
- TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
- max_coeff_count);
- TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
- VERIFY_IS_EQUAL(7, block.block_sizes()[4]);
- VERIFY_IS_EQUAL(6, block.block_sizes()[3]);
- VERIFY_IS_EQUAL(5, block.block_sizes()[2]);
- VERIFY_IS_EQUAL(5, block.block_sizes()[1]);
- VERIFY_IS_EQUAL(5, block.block_sizes()[0]);
- VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ TensorBlockMapper
+ block_mapper(dims, {TensorBlockV2ShapeType::kUniformAllDims,
+ max_coeff_count});
+ TensorBlock block = block_mapper.blockDescriptor(0);
+ VERIFY_IS_EQUAL(7, block.dimensions()[4]);
+ VERIFY_IS_EQUAL(6, block.dimensions()[3]);
+ VERIFY_IS_EQUAL(5, block.dimensions()[2]);
+ VERIFY_IS_EQUAL(5, block.dimensions()[1]);
+ VERIFY_IS_EQUAL(5, block.dimensions()[0]);
+ VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
}
// Test shape 'UniformAllDims' with full allocation to all dims.
if (Layout == ColMajor) {
DSizes<Index, 5> dims(7, 5, 6, 17, 7);
const Index max_coeff_count = 7 * 5 * 6 * 17 * 7;
- TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
- max_coeff_count);
- TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
- VERIFY_IS_EQUAL(7, block.block_sizes()[0]);
- VERIFY_IS_EQUAL(5, block.block_sizes()[1]);
- VERIFY_IS_EQUAL(6, block.block_sizes()[2]);
- VERIFY_IS_EQUAL(17, block.block_sizes()[3]);
- VERIFY_IS_EQUAL(7, block.block_sizes()[4]);
- VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ TensorBlockMapper
+ block_mapper(dims, {TensorBlockV2ShapeType::kUniformAllDims,
+ max_coeff_count});
+ TensorBlock block = block_mapper.blockDescriptor(0);
+ VERIFY_IS_EQUAL(7, block.dimensions()[0]);
+ VERIFY_IS_EQUAL(5, block.dimensions()[1]);
+ VERIFY_IS_EQUAL(6, block.dimensions()[2]);
+ VERIFY_IS_EQUAL(17, block.dimensions()[3]);
+ VERIFY_IS_EQUAL(7, block.dimensions()[4]);
+ VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
} else {
DSizes<Index, 5> dims(7, 5, 6, 9, 7);
const Index max_coeff_count = 7 * 5 * 6 * 9 * 7;
- TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
- max_coeff_count);
- TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
- VERIFY_IS_EQUAL(7, block.block_sizes()[4]);
- VERIFY_IS_EQUAL(9, block.block_sizes()[3]);
- VERIFY_IS_EQUAL(6, block.block_sizes()[2]);
- VERIFY_IS_EQUAL(5, block.block_sizes()[1]);
- VERIFY_IS_EQUAL(7, block.block_sizes()[0]);
- VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ TensorBlockMapper
+ block_mapper(dims, {TensorBlockV2ShapeType::kUniformAllDims,
+ max_coeff_count});
+ TensorBlock block = block_mapper.blockDescriptor(0);
+ VERIFY_IS_EQUAL(7, block.dimensions()[4]);
+ VERIFY_IS_EQUAL(9, block.dimensions()[3]);
+ VERIFY_IS_EQUAL(6, block.dimensions()[2]);
+ VERIFY_IS_EQUAL(5, block.dimensions()[1]);
+ VERIFY_IS_EQUAL(7, block.dimensions()[0]);
+ VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
}
}
template <int Layout>
static void test_skewed_inner_dim_block_shape()
{
- typedef internal::TensorBlock<int, Index, 5, Layout> TensorBlock;
- typedef internal::TensorBlockMapper<int, Index, 5, Layout> TensorBlockMapper;
+ typedef internal::TensorBlockDescriptor<5> TensorBlock;
+ typedef internal::TensorBlockV2Mapper<5, Layout> TensorBlockMapper;
// Test shape 'SkewedInnerDims' with partial allocation to inner-most dim.
if (Layout == ColMajor) {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const Index max_coeff_count = 10 * 1 * 1 * 1 * 1;
- TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
- max_coeff_count);
- TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
- VERIFY_IS_EQUAL(10, block.block_sizes()[0]);
+ TensorBlockMapper
+ block_mapper(dims, {TensorBlockV2ShapeType::kSkewedInnerDims,
+ max_coeff_count});
+ TensorBlock block = block_mapper.blockDescriptor(0);
+ VERIFY_IS_EQUAL(10, block.dimensions()[0]);
for (int i = 1; i < 5; ++i) {
- VERIFY_IS_EQUAL(1, block.block_sizes()[i]);
+ VERIFY_IS_EQUAL(1, block.dimensions()[i]);
}
- VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
} else {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const Index max_coeff_count = 1 * 1 * 1 * 1 * 6;
- TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
- max_coeff_count);
- TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
- VERIFY_IS_EQUAL(6, block.block_sizes()[4]);
+ TensorBlockMapper
+ block_mapper(dims, {TensorBlockV2ShapeType::kSkewedInnerDims,
+ max_coeff_count});
+ TensorBlock block = block_mapper.blockDescriptor(0);
+ VERIFY_IS_EQUAL(6, block.dimensions()[4]);
for (int i = 3; i >= 0; --i) {
- VERIFY_IS_EQUAL(1, block.block_sizes()[i]);
+ VERIFY_IS_EQUAL(1, block.dimensions()[i]);
}
- VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
}
// Test shape 'SkewedInnerDims' with full allocation to inner-most dim.
if (Layout == ColMajor) {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const Index max_coeff_count = 11 * 1 * 1 * 1 * 1;
- TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
- max_coeff_count);
- TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
- VERIFY_IS_EQUAL(11, block.block_sizes()[0]);
+ TensorBlockMapper
+ block_mapper(dims, {TensorBlockV2ShapeType::kSkewedInnerDims,
+ max_coeff_count});
+ TensorBlock block = block_mapper.blockDescriptor(0);
+ VERIFY_IS_EQUAL(11, block.dimensions()[0]);
for (int i = 1; i < 5; ++i) {
- VERIFY_IS_EQUAL(1, block.block_sizes()[i]);
+ VERIFY_IS_EQUAL(1, block.dimensions()[i]);
}
- VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
} else {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const Index max_coeff_count = 1 * 1 * 1 * 1 * 7;
- TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
- max_coeff_count);
- TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
- VERIFY_IS_EQUAL(7, block.block_sizes()[4]);
+ TensorBlockMapper
+ block_mapper(dims, {TensorBlockV2ShapeType::kSkewedInnerDims,
+ max_coeff_count});
+ TensorBlock block = block_mapper.blockDescriptor(0);
+ VERIFY_IS_EQUAL(7, block.dimensions()[4]);
for (int i = 3; i >= 0; --i) {
- VERIFY_IS_EQUAL(1, block.block_sizes()[i]);
+ VERIFY_IS_EQUAL(1, block.dimensions()[i]);
}
- VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
}
// Test shape 'SkewedInnerDims' with full allocation to inner-most dim,
@@ -422,27 +424,29 @@ static void test_skewed_inner_dim_block_shape()
if (Layout == ColMajor) {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const Index max_coeff_count = 11 * 3 * 1 * 1 * 1;
- TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
- max_coeff_count);
- TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
- VERIFY_IS_EQUAL(11, block.block_sizes()[0]);
- VERIFY_IS_EQUAL(3, block.block_sizes()[1]);
+ TensorBlockMapper
+ block_mapper(dims, {TensorBlockV2ShapeType::kSkewedInnerDims,
+ max_coeff_count});
+ TensorBlock block = block_mapper.blockDescriptor(0);
+ VERIFY_IS_EQUAL(11, block.dimensions()[0]);
+ VERIFY_IS_EQUAL(3, block.dimensions()[1]);
for (int i = 2; i < 5; ++i) {
- VERIFY_IS_EQUAL(1, block.block_sizes()[i]);
+ VERIFY_IS_EQUAL(1, block.dimensions()[i]);
}
- VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
} else {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const Index max_coeff_count = 1 * 1 * 1 * 15 * 7;
- TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
- max_coeff_count);
- TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
- VERIFY_IS_EQUAL(7, block.block_sizes()[4]);
- VERIFY_IS_EQUAL(15, block.block_sizes()[3]);
+ TensorBlockMapper
+ block_mapper(dims, {TensorBlockV2ShapeType::kSkewedInnerDims,
+ max_coeff_count});
+ TensorBlock block = block_mapper.blockDescriptor(0);
+ VERIFY_IS_EQUAL(7, block.dimensions()[4]);
+ VERIFY_IS_EQUAL(15, block.dimensions()[3]);
for (int i = 2; i >= 0; --i) {
- VERIFY_IS_EQUAL(1, block.block_sizes()[i]);
+ VERIFY_IS_EQUAL(1, block.dimensions()[i]);
}
- VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
}
// Test shape 'SkewedInnerDims' with full allocation to inner-most dim,
@@ -450,61 +454,65 @@ static void test_skewed_inner_dim_block_shape()
if (Layout == ColMajor) {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const Index max_coeff_count = 11 * 5 * 5 * 1 * 1;
- TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
- max_coeff_count);
- TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
- VERIFY_IS_EQUAL(11, block.block_sizes()[0]);
- VERIFY_IS_EQUAL(5, block.block_sizes()[1]);
- VERIFY_IS_EQUAL(5, block.block_sizes()[2]);
+ TensorBlockMapper
+ block_mapper(dims, {TensorBlockV2ShapeType::kSkewedInnerDims,
+ max_coeff_count});
+ TensorBlock block = block_mapper.blockDescriptor(0);
+ VERIFY_IS_EQUAL(11, block.dimensions()[0]);
+ VERIFY_IS_EQUAL(5, block.dimensions()[1]);
+ VERIFY_IS_EQUAL(5, block.dimensions()[2]);
for (int i = 3; i < 5; ++i) {
- VERIFY_IS_EQUAL(1, block.block_sizes()[i]);
+ VERIFY_IS_EQUAL(1, block.dimensions()[i]);
}
- VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
} else {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const Index max_coeff_count = 1 * 1 * 5 * 17 * 7;
- TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
- max_coeff_count);
- TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
- VERIFY_IS_EQUAL(7, block.block_sizes()[4]);
- VERIFY_IS_EQUAL(17, block.block_sizes()[3]);
- VERIFY_IS_EQUAL(5, block.block_sizes()[2]);
+ TensorBlockMapper
+ block_mapper(dims, {TensorBlockV2ShapeType::kSkewedInnerDims,
+ max_coeff_count});
+ TensorBlock block = block_mapper.blockDescriptor(0);
+ VERIFY_IS_EQUAL(7, block.dimensions()[4]);
+ VERIFY_IS_EQUAL(17, block.dimensions()[3]);
+ VERIFY_IS_EQUAL(5, block.dimensions()[2]);
for (int i = 1; i >= 0; --i) {
- VERIFY_IS_EQUAL(1, block.block_sizes()[i]);
+ VERIFY_IS_EQUAL(1, block.dimensions()[i]);
}
- VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
}
// Test shape 'SkewedInnerDims' with full allocation to all dims.
if (Layout == ColMajor) {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const Index max_coeff_count = 11 * 5 * 6 * 17 * 7;
- TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
- max_coeff_count);
- TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
- VERIFY_IS_EQUAL(11, block.block_sizes()[0]);
- VERIFY_IS_EQUAL(5, block.block_sizes()[1]);
- VERIFY_IS_EQUAL(6, block.block_sizes()[2]);
- VERIFY_IS_EQUAL(17, block.block_sizes()[3]);
- VERIFY_IS_EQUAL(7, block.block_sizes()[4]);
- VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ TensorBlockMapper
+ block_mapper(dims, {TensorBlockV2ShapeType::kSkewedInnerDims,
+ max_coeff_count});
+ TensorBlock block = block_mapper.blockDescriptor(0);
+ VERIFY_IS_EQUAL(11, block.dimensions()[0]);
+ VERIFY_IS_EQUAL(5, block.dimensions()[1]);
+ VERIFY_IS_EQUAL(6, block.dimensions()[2]);
+ VERIFY_IS_EQUAL(17, block.dimensions()[3]);
+ VERIFY_IS_EQUAL(7, block.dimensions()[4]);
+ VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
} else {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const Index max_coeff_count = 11 * 5 * 6 * 17 * 7;
- TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
- max_coeff_count);
- TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
- VERIFY_IS_EQUAL(7, block.block_sizes()[4]);
- VERIFY_IS_EQUAL(17, block.block_sizes()[3]);
- VERIFY_IS_EQUAL(6, block.block_sizes()[2]);
- VERIFY_IS_EQUAL(5, block.block_sizes()[1]);
- VERIFY_IS_EQUAL(11, block.block_sizes()[0]);
- VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ TensorBlockMapper
+ block_mapper(dims, {TensorBlockV2ShapeType::kSkewedInnerDims,
+ max_coeff_count});
+ TensorBlock block = block_mapper.blockDescriptor(0);
+ VERIFY_IS_EQUAL(7, block.dimensions()[4]);
+ VERIFY_IS_EQUAL(17, block.dimensions()[3]);
+ VERIFY_IS_EQUAL(6, block.dimensions()[2]);
+ VERIFY_IS_EQUAL(5, block.dimensions()[1]);
+ VERIFY_IS_EQUAL(11, block.dimensions()[0]);
+ VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
}
}
template <int Layout>
-static void test_empty_dims(const internal::TensorBlockShapeType block_shape)
+static void test_empty_dims(const internal::TensorBlockV2ShapeType block_shape)
{
// Test blocking of tensors with zero dimensions:
// - we must not crash on asserts and divisions by zero
@@ -512,26 +520,28 @@ static void test_empty_dims(const internal::TensorBlockShapeType block_shape)
// (recipe for overflows/underflows, divisions by zero and NaNs later)
// - total block count must be zero
{
- typedef internal::TensorBlockMapper<int, Index, 1, Layout> TensorBlockMapper;
+ typedef internal::TensorBlockV2Mapper<1, Layout> TensorBlockMapper;
+
DSizes<Index, 1> dims(0);
- for (int max_coeff_count = 0; max_coeff_count < 2; ++max_coeff_count) {
- TensorBlockMapper block_mapper(dims, block_shape, max_coeff_count);
- VERIFY_IS_EQUAL(block_mapper.total_block_count(), 0);
- VERIFY(block_mapper.block_dims_total_size() >= 1);
+ for (size_t max_coeff_count = 0; max_coeff_count < 2; ++max_coeff_count) {
+ TensorBlockMapper block_mapper(dims, {block_shape, max_coeff_count});
+ VERIFY_IS_EQUAL(block_mapper.blockCount(), 0);
+ VERIFY(block_mapper.blockTotalSize() >= 1);
}
}
{
- typedef internal::TensorBlockMapper<int, Index, 2, Layout> TensorBlockMapper;
+ typedef internal::TensorBlockV2Mapper<2, Layout> TensorBlockMapper;
+
for (int dim1 = 0; dim1 < 3; ++dim1) {
for (int dim2 = 0; dim2 < 3; ++dim2) {
DSizes<Index, 2> dims(dim1, dim2);
- for (int max_coeff_count = 0; max_coeff_count < 2; ++max_coeff_count) {
- TensorBlockMapper block_mapper(dims, block_shape, max_coeff_count);
+ for (size_t max_coeff_count = 0; max_coeff_count < 2; ++max_coeff_count) {
+ TensorBlockMapper block_mapper(dims, {block_shape, max_coeff_count});
if (dim1 * dim2 == 0) {
- VERIFY_IS_EQUAL(block_mapper.total_block_count(), 0);
+ VERIFY_IS_EQUAL(block_mapper.blockCount(), 0);
}
- VERIFY(block_mapper.block_dims_total_size() >= 1);
+ VERIFY(block_mapper.blockTotalSize() >= 1);
}
}
}
@@ -563,8 +573,8 @@ EIGEN_DECLARE_TEST(cxx11_tensor_block_access) {
TEST_LAYOUTS_AND_DIMS(float, test_block_mapper_maps_every_element);
TEST_LAYOUTS(test_uniform_block_shape);
TEST_LAYOUTS(test_skewed_inner_dim_block_shape);
- TEST_LAYOUTS_WITH_ARG(test_empty_dims, internal::kUniformAllDims);
- TEST_LAYOUTS_WITH_ARG(test_empty_dims, internal::kSkewedInnerDims);
+ TEST_LAYOUTS_WITH_ARG(test_empty_dims, TensorBlockV2ShapeType::kUniformAllDims);
+ TEST_LAYOUTS_WITH_ARG(test_empty_dims, TensorBlockV2ShapeType::kSkewedInnerDims);
}
#undef TEST_LAYOUTS
diff --git a/unsupported/test/cxx11_tensor_block_eval.cpp b/unsupported/test/cxx11_tensor_block_eval.cpp
index 086dd8c11..700e84a19 100644
--- a/unsupported/test/cxx11_tensor_block_eval.cpp
+++ b/unsupported/test/cxx11_tensor_block_eval.cpp
@@ -61,21 +61,21 @@ static TensorBlockParams<NumDims> RandomBlock(DSizes<Index, NumDims> dims,
template <int Layout, int NumDims>
static TensorBlockParams<NumDims> SkewedInnerBlock(
DSizes<Index, NumDims> dims) {
- using BlockMapper = internal::TensorBlockMapper<int, Index, NumDims, Layout>;
+ using BlockMapper = internal::TensorBlockV2Mapper<NumDims, Layout, Index>;
BlockMapper block_mapper(dims,
- internal::TensorBlockShapeType::kSkewedInnerDims,
- internal::random<Index>(1, dims.TotalSize()));
+ {internal::TensorBlockV2ShapeType::kSkewedInnerDims,
+ internal::random<size_t>(1, dims.TotalSize())});
- Index total_blocks = block_mapper.total_block_count();
+ Index total_blocks = block_mapper.blockCount();
Index block_index = internal::random<Index>(0, total_blocks - 1);
- auto block = block_mapper.GetBlockForIndex(block_index, nullptr);
- DSizes<Index, NumDims> sizes = block.block_sizes();
+ auto block = block_mapper.blockDescriptor(block_index);
+ DSizes<Index, NumDims> sizes = block.dimensions();
auto strides = internal::strides<Layout>(dims);
DSizes<Index, NumDims> offsets;
// Compute offsets for the first block coefficient.
- Index index = block.first_coeff_index();
+ Index index = block.offset();
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int i = NumDims - 1; i > 0; --i) {
const Index idx = index / strides[i];
@@ -92,8 +92,7 @@ static TensorBlockParams<NumDims> SkewedInnerBlock(
if (NumDims > 0) offsets[NumDims - 1] = index;
}
- auto desc = TensorBlockDescriptor<NumDims>(block.first_coeff_index(), sizes);
- return {offsets, sizes, desc};
+ return {offsets, sizes, block};
}
template <int NumDims>
diff --git a/unsupported/test/cxx11_tensor_block_io.cpp b/unsupported/test/cxx11_tensor_block_io.cpp
index ddda3c7f9..6f318d9fe 100644
--- a/unsupported/test/cxx11_tensor_block_io.cpp
+++ b/unsupported/test/cxx11_tensor_block_io.cpp
@@ -22,14 +22,15 @@ static DSizes<Index, NumDims> RandomDims(Index min, Index max) {
return DSizes<Index, NumDims>(dims);
}
-static internal::TensorBlockShapeType RandomBlockShape() {
- return internal::random<bool>() ? internal::kUniformAllDims
- : internal::kSkewedInnerDims;
+static internal::TensorBlockV2ShapeType RandomBlockShape() {
+ return internal::random<bool>()
+ ? internal::TensorBlockV2ShapeType::kUniformAllDims
+ : internal::TensorBlockV2ShapeType::kSkewedInnerDims;
}
template <int NumDims>
-static Index RandomTargetBlockSize(const DSizes<Index, NumDims>& dims) {
- return internal::random<Index>(1, dims.TotalSize());
+static size_t RandomTargetBlockSize(const DSizes<Index, NumDims>& dims) {
+ return internal::random<size_t>(1, dims.TotalSize());
}
template <int Layout, int NumDims>
@@ -73,12 +74,12 @@ static void test_block_io_copy_data_from_source_to_target() {
// Construct a tensor block mapper.
using TensorBlockMapper =
- internal::TensorBlockMapper<T, Index, NumDims, Layout>;
- TensorBlockMapper block_mapper(dims, RandomBlockShape(),
- RandomTargetBlockSize(dims));
+ internal::TensorBlockV2Mapper<NumDims, Layout, Index>;
+ TensorBlockMapper block_mapper(dims, {RandomBlockShape(),
+ RandomTargetBlockSize(dims)});
// We will copy data from input to output through this buffer.
- Tensor<T, NumDims, Layout> block(block_mapper.block_dim_sizes());
+ Tensor<T, NumDims, Layout> block(block_mapper.blockDimensions());
// Precompute strides for TensorBlockIO::Copy.
auto input_strides = internal::strides<Layout>(dims);
@@ -88,24 +89,23 @@ static void test_block_io_copy_data_from_source_to_target() {
T* output_data = output.data();
T* block_data = block.data();
- for (int i = 0; i < block_mapper.total_block_count(); ++i) {
- using TensorBlock = internal::TensorBlock<T, Index, NumDims, Layout>;
- TensorBlock blk = block_mapper.GetBlockForIndex(i, block_data);
+ for (int i = 0; i < block_mapper.blockCount(); ++i) {
+ auto desc = block_mapper.blockDescriptor(i);
- auto blk_dims = blk.block_sizes();
+ auto blk_dims = desc.dimensions();
auto blk_strides = internal::strides<Layout>(blk_dims);
{
// Read from input into a block buffer.
IODst dst(blk_dims, blk_strides, block_data, 0);
- IOSrc src(input_strides, input_data, blk.first_coeff_index());
+ IOSrc src(input_strides, input_data, desc.offset());
TensorBlockIO::Copy(dst, src);
}
{
// Write from block buffer to output.
- IODst dst(blk_dims, output_strides, output_data, blk.first_coeff_index());
+ IODst dst(blk_dims, output_strides, output_data, desc.offset());
IOSrc src(blk_strides, block_data, 0);
TensorBlockIO::Copy(dst, src);
@@ -145,12 +145,12 @@ static void test_block_io_copy_using_reordered_dimensions() {
// Construct a tensor block mapper.
// NOTE: Tensor block mapper works with shuffled dimensions.
using TensorBlockMapper =
- internal::TensorBlockMapper<T, Index, NumDims, Layout>;
- TensorBlockMapper block_mapper(output_tensor_dims, RandomBlockShape(),
- RandomTargetBlockSize(output_tensor_dims));
+ internal::TensorBlockV2Mapper<NumDims, Layout, Index>;
+ TensorBlockMapper block_mapper(output_tensor_dims, {RandomBlockShape(),
+ RandomTargetBlockSize(output_tensor_dims)});
// We will copy data from input to output through this buffer.
- Tensor<T, NumDims, Layout> block(block_mapper.block_dim_sizes());
+ Tensor<T, NumDims, Layout> block(block_mapper.blockDimensions());
// Precompute strides for TensorBlockIO::Copy.
auto input_strides = internal::strides<Layout>(dims);
@@ -160,12 +160,11 @@ static void test_block_io_copy_using_reordered_dimensions() {
T* output_data = output.data();
T* block_data = block.data();
- for (Index i = 0; i < block_mapper.total_block_count(); ++i) {
- using TensorBlock = internal::TensorBlock<T, Index, NumDims, Layout>;
- TensorBlock blk = block_mapper.GetBlockForIndex(i, block_data);
+ for (Index i = 0; i < block_mapper.blockCount(); ++i) {
+ auto desc = block_mapper.blockDescriptor(i);
const Index first_coeff_index = GetInputIndex<Layout, NumDims>(
- blk.first_coeff_index(), output_to_input_dim_map, input_strides,
+ desc.offset(), output_to_input_dim_map, input_strides,
output_strides);
// NOTE: Block dimensions are in the same order as output dimensions.
@@ -174,7 +173,7 @@ static void test_block_io_copy_using_reordered_dimensions() {
using IODst = typename TensorBlockIO::Dst;
using IOSrc = typename TensorBlockIO::Src;
- auto blk_dims = blk.block_sizes();
+ auto blk_dims = desc.dimensions();
auto blk_strides = internal::strides<Layout>(blk_dims);
{
@@ -236,16 +235,13 @@ static void test_block_io_copy_using_reordered_dimensions_do_not_squeeze() {
float* tensor_data = tensor.data();
float* block_data = block.data();
- typedef internal::TensorBlock<float, Index, 3, Layout> TensorBlock;
- TensorBlock blk(0, block_dims, block_strides, tensor_strides, block_data);
-
using TensorBlockIO = internal::TensorBlockIOV2<float, Index, 3, Layout>;
using IODst = typename TensorBlockIO::Dst;
using IOSrc = typename TensorBlockIO::Src;
// Read from a tensor into a block.
- IODst dst(blk.block_sizes(), block_strides, block_data, 0);
- IOSrc src(tensor_strides, tensor_data, blk.first_coeff_index());
+ IODst dst(block_dims, block_strides, block_data, 0);
+ IOSrc src(tensor_strides, tensor_data, 0);
TensorBlockIO::Copy(dst, src, /*dst_to_src_dim_map=*/block_to_tensor_dim);
@@ -287,16 +283,13 @@ static void test_block_io_copy_using_reordered_dimensions_squeeze() {
float* tensor_data = tensor.data();
float* block_data = block.data();
- typedef internal::TensorBlock<float, Index, 4, Layout> TensorBlock;
- TensorBlock blk(0, block_dims, block_strides, tensor_strides, block_data);
-
using TensorBlockIO = internal::TensorBlockIOV2<float, Index, 4, Layout>;
using IODst = typename TensorBlockIO::Dst;
using IOSrc = typename TensorBlockIO::Src;
// Read from a tensor into a block.
- IODst dst(blk.block_sizes(), block_strides, block_data, 0);
- IOSrc src(tensor_strides, tensor_data, blk.first_coeff_index());
+ IODst dst(block_dims, block_strides, block_data, 0);
+ IOSrc src(tensor_strides, tensor_data, 0);
TensorBlockIO::Copy(dst, src, /*dst_to_src_dim_map=*/block_to_tensor_dim);