aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Eugene Zhulenev <ezhulenev@google.com>2018-09-11 13:32:32 -0700
committerGravatar Eugene Zhulenev <ezhulenev@google.com>2018-09-11 13:32:32 -0700
commit81b38a155adf5d527bce5c84cf90cd83c28da445 (patch)
tree8d0310b4b06a4667447ac7d0b4c66ebd16efb903
parentc144bb355b74f4600156284e8202fcf9c0c135d8 (diff)
Fix compilation of tiled evaluation code with c++03
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h6
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h11
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h5
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h5
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h5
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h5
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h4
-rw-r--r--unsupported/test/cxx11_tensor_block_access.cpp14
-rw-r--r--unsupported/test/cxx11_tensor_shuffling.cpp10
9 files changed, 30 insertions, 35 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h
index 6d90af2d3..13da36257 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h
@@ -396,8 +396,8 @@ struct TensorBlockCwiseUnaryOp {
typedef const Eigen::Array<InputScalar, Dynamic, 1> Input;
typedef Eigen::Array<OutputScalar, Dynamic, 1> Output;
- typedef Eigen::Map<Input, 0, InnerStride<>> InputMap;
- typedef Eigen::Map<Output, 0, InnerStride<>> OutputMap;
+ typedef Eigen::Map<Input, 0, InnerStride<> > InputMap;
+ typedef Eigen::Map<Output, 0, InnerStride<> > OutputMap;
const InputScalar* input_base = &input_data[input_index];
OutputScalar* output_base = &output_data[output_index];
@@ -502,7 +502,7 @@ struct TensorBlockCwiseUnaryIO {
input_stride, input_data);
// Update index.
for (int j = 0; j < num_squeezed_dims; ++j) {
- auto& state = block_iter_state[j];
+ BlockIteratorState& state = block_iter_state[j];
if (++state.count < state.size) {
output_index += state.output_stride;
input_index += state.input_stride;
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h
index 02d061a9c..e5cf93ab0 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h
@@ -596,12 +596,11 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
std::vector<internal::TensorOpResourceRequirements>* resources) const {
// TODO(wuke): Targeting L1 size is 30% faster than targeting L{-1} on large
// tensors. But this might need further tuning.
- auto block_total_size_max = numext::maxi<Eigen::Index>(
+ Eigen::Index block_total_size_max = numext::maxi<Eigen::Index>(
1, m_device.firstLevelCacheSize() / sizeof(Scalar));
resources->push_back(internal::TensorOpResourceRequirements(
- internal::TensorBlockShapeType::kSkewedInnerDims,
- block_total_size_max));
+ internal::kSkewedInnerDims, block_total_size_max));
m_impl.getResourceRequirements(resources);
}
@@ -617,8 +616,8 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
// equal to m_dimensions for inner dims, a smaller than m_dimensions[i] size
// for the first outer dim, and 1 for other outer dims. This is guaranteed
// by MergeResourceRequirements() in TensorBlock.h.
- const auto& output_block_sizes = output_block->block_sizes();
- const auto& output_block_strides = output_block->block_strides();
+ const Dimensions& output_block_sizes = output_block->block_sizes();
+ const Dimensions& output_block_strides = output_block->block_strides();
// Find where outer dims start.
int outer_dim_start = 0;
@@ -642,7 +641,7 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
return;
}
- const auto& input_dims = m_impl.dimensions();
+ const Dimensions& input_dims = m_impl.dimensions();
// Pre-fill input_block_sizes, broadcast_block_sizes,
// broadcast_block_strides, and broadcast_tensor_strides. Later on we will
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h b/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h
index 76fab39e2..b47fa9e8e 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h
@@ -290,11 +290,10 @@ struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void getResourceRequirements(
std::vector<internal::TensorOpResourceRequirements>* resources) const {
- auto block_total_size_max = numext::maxi<Eigen::Index>(
+ Eigen::Index block_total_size_max = numext::maxi<Eigen::Index>(
1, m_device.lastLevelCacheSize() / sizeof(Scalar));
resources->push_back(internal::TensorOpResourceRequirements(
- internal::TensorBlockShapeType::kSkewedInnerDims,
- block_total_size_max));
+ internal::kSkewedInnerDims, block_total_size_max));
m_impl.getResourceRequirements(resources);
}
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h b/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h
index 1826d7022..965bd8f1e 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h
@@ -550,11 +550,10 @@ struct TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>, Device>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void getResourceRequirements(
std::vector<internal::TensorOpResourceRequirements>* resources) const {
- auto block_total_size_max = numext::maxi<Eigen::Index>(
+ Eigen::Index block_total_size_max = numext::maxi<Eigen::Index>(
1, m_device.lastLevelCacheSize() / sizeof(Scalar));
resources->push_back(internal::TensorOpResourceRequirements(
- internal::TensorBlockShapeType::kSkewedInnerDims,
- block_total_size_max));
+ internal::kSkewedInnerDims, block_total_size_max));
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void block(
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h b/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h
index 2f765acb7..16dc74afe 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h
@@ -677,11 +677,10 @@ struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Devi
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void getResourceRequirements(
std::vector<internal::TensorOpResourceRequirements>* resources) const {
- auto block_total_size_max = numext::maxi<Eigen::Index>(
+ Eigen::Index block_total_size_max = numext::maxi<Eigen::Index>(
1, m_device.lastLevelCacheSize() / sizeof(Scalar));
resources->push_back(internal::TensorOpResourceRequirements(
- internal::TensorBlockShapeType::kSkewedInnerDims,
- block_total_size_max));
+ internal::kSkewedInnerDims, block_total_size_max));
m_impl.getResourceRequirements(resources);
}
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
index 3d534eaa2..eeb2578fd 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
@@ -771,11 +771,10 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void getResourceRequirements(
std::vector<internal::TensorOpResourceRequirements>* resources) const {
- auto block_total_size_max = numext::maxi<Eigen::Index>(
+ Eigen::Index block_total_size_max = numext::maxi<Eigen::Index>(
1, m_device.lastLevelCacheSize() / sizeof(Scalar));
resources->push_back(internal::TensorOpResourceRequirements(
- internal::TensorBlockShapeType::kSkewedInnerDims,
- block_total_size_max));
+ internal::kSkewedInnerDims, block_total_size_max));
m_impl.getResourceRequirements(resources);
}
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h b/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h
index a5b541a68..e018d0ab2 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h
@@ -229,10 +229,10 @@ struct TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void getResourceRequirements(
std::vector<internal::TensorOpResourceRequirements>* resources) const {
- auto block_total_size_max = numext::maxi<Eigen::Index>(
+ Eigen::Index block_total_size_max = numext::maxi<Eigen::Index>(
1, m_device.firstLevelCacheSize() / sizeof(Scalar));
resources->push_back(internal::TensorOpResourceRequirements(
- internal::TensorBlockShapeType::kUniformAllDims, block_total_size_max));
+ internal::kUniformAllDims, block_total_size_max));
m_impl.getResourceRequirements(resources);
}
diff --git a/unsupported/test/cxx11_tensor_block_access.cpp b/unsupported/test/cxx11_tensor_block_access.cpp
index eec282ba7..ad12ae557 100644
--- a/unsupported/test/cxx11_tensor_block_access.cpp
+++ b/unsupported/test/cxx11_tensor_block_access.cpp
@@ -535,7 +535,7 @@ static void test_block_cwise_unary_io_basic() {
DSizes<Index, NumDims> block_sizes = RandomDims<NumDims>();
DSizes<Index, NumDims> strides(ComputeStrides<Layout, NumDims>(block_sizes));
- const auto total_size = block_sizes.TotalSize();
+ const Index total_size = block_sizes.TotalSize();
// Create a random input tensors.
T* input_data = GenerateRandomData<T>(total_size);
@@ -562,12 +562,12 @@ static void test_block_cwise_unary_io_squeeze_ones() {
DSizes<Index, 5> block_sizes(1, 2, 1, 3, 1);
DSizes<Index, 5> strides(ComputeStrides<Layout, 5>(block_sizes));
- const auto total_size = block_sizes.TotalSize();
+ const Index total_size = block_sizes.TotalSize();
// Create a random input tensors.
- auto* input_data = GenerateRandomData<float>(total_size);
+ float* input_data = GenerateRandomData<float>(total_size);
- auto* output_data = new float[total_size];
+ float* output_data = new float[total_size];
UnaryFunctor functor;
TensorBlockCwiseUnaryIO::Run(functor, block_sizes, strides, output_data,
strides, input_data);
@@ -599,13 +599,13 @@ static void test_block_cwise_unary_io_zero_strides() {
input_strides[4] = 0;
// Generate random data.
- auto* input_data = GenerateRandomData<float>(input_sizes.TotalSize());
+ float* input_data = GenerateRandomData<float>(input_sizes.TotalSize());
DSizes<Index, 5> output_sizes = rnd_dims;
DSizes<Index, 5> output_strides(ComputeStrides<Layout, 5>(output_sizes));
- const auto output_total_size = output_sizes.TotalSize();
- auto* output_data = new float[output_total_size];
+ const Index output_total_size = output_sizes.TotalSize();
+ float* output_data = new float[output_total_size];
UnaryFunctor functor;
TensorBlockCwiseUnaryIO::Run(functor, output_sizes, output_strides,
diff --git a/unsupported/test/cxx11_tensor_shuffling.cpp b/unsupported/test/cxx11_tensor_shuffling.cpp
index 467df39c7..062dd1c0f 100644
--- a/unsupported/test/cxx11_tensor_shuffling.cpp
+++ b/unsupported/test/cxx11_tensor_shuffling.cpp
@@ -81,12 +81,12 @@ static void test_expr_shuffling()
Tensor<float, 4, DataLayout> expected;
expected = tensor.shuffle(shuffles);
- Tensor<float, 4, DataLayout> result(5,7,3,2);
+ Tensor<float, 4, DataLayout> result(5, 7, 3, 2);
- array<ptrdiff_t, 4> src_slice_dim{{2,3,1,7}};
- array<ptrdiff_t, 4> src_slice_start{{0,0,0,0}};
- array<ptrdiff_t, 4> dst_slice_dim{{1,7,3,2}};
- array<ptrdiff_t, 4> dst_slice_start{{0,0,0,0}};
+ array<ptrdiff_t, 4> src_slice_dim({2, 3, 1, 7});
+ array<ptrdiff_t, 4> src_slice_start({0, 0, 0, 0});
+ array<ptrdiff_t, 4> dst_slice_dim({1, 7, 3, 2});
+ array<ptrdiff_t, 4> dst_slice_start({0, 0, 0, 0});
for (int i = 0; i < 5; ++i) {
result.slice(dst_slice_start, dst_slice_dim) =