aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h
diff options
context:
space:
mode:
authorGravatar Eugene Zhulenev <ezhulenev@google.com>2019-11-12 10:12:28 -0800
committerGravatar Eugene Zhulenev <ezhulenev@google.com>2019-11-12 10:12:28 -0800
commit13c3327f5cf829fd9d04a2ab46861e722cd74ca0 (patch)
tree20bd1a5f361023db822298696efbcff7378ab4a7 /unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h
parent71aa53dd6dfdc497324d9e87f59c4ba820191856 (diff)
Remove legacy block evaluation support
Diffstat (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h')
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h68
1 files changed, 0 insertions, 68 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h b/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h
index 32d6960bf..098110217 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h
@@ -148,7 +148,6 @@ struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
IsAligned = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
- BlockAccess = TensorEvaluator<ArgType, Device>::BlockAccess,
BlockAccessV2 = TensorEvaluator<ArgType, Device>::BlockAccessV2,
// Chipping of outer-most dimension is a trivial operation, because we can
// read and write directly from the underlying tensor using single offset.
@@ -167,11 +166,6 @@ struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
typedef typename internal::remove_const<Scalar>::type ScalarNoConst;
- typedef internal::TensorBlock<ScalarNoConst, Index, NumInputDims, Layout>
- InputTensorBlock;
- typedef internal::TensorBlock<ScalarNoConst, Index, NumDims, Layout>
- OutputTensorBlock;
-
//===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
typedef internal::TensorBlockScratchAllocator<Device> TensorBlockScratch;
@@ -218,20 +212,6 @@ struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
}
m_inputStride *= input_dims[m_dim.actualDim()];
m_inputOffset = m_stride * op.offset();
-
- if (BlockAccess) {
- if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
- m_inputStrides[0] = 1;
- for (int i = 1; i < NumInputDims; ++i) {
- m_inputStrides[i] = m_inputStrides[i - 1] * input_dims[i - 1];
- }
- } else {
- m_inputStrides[NumInputDims - 1] = 1;
- for (int i = NumInputDims - 2; i >= 0; --i) {
- m_inputStrides[i] = m_inputStrides[i + 1] * input_dims[i + 1];
- }
- }
- }
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
@@ -323,52 +303,6 @@ struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
m_impl.getResourceRequirements(resources);
}
- // TODO(andydavis) Reduce the overhead of this function (experiment with
- // using a fixed block size).
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void block(
- OutputTensorBlock* output_block) const {
- // Calculate input block sizes.
- const DSizes<Index, NumDims>& output_block_sizes =
- output_block->block_sizes();
- const DSizes<Index, NumDims>& output_block_strides =
- output_block->block_strides();
- const Index chip_dim = m_dim.actualDim();
- DSizes<Index, NumInputDims> input_block_sizes;
- DSizes<Index, NumInputDims> input_block_strides;
- for (Index i = 0; i < NumInputDims; ++i) {
- if (i < chip_dim) {
- input_block_sizes[i] = output_block_sizes[i];
- input_block_strides[i] = output_block_strides[i];
- } else if (i > chip_dim) {
- input_block_sizes[i] = output_block_sizes[i - 1];
- input_block_strides[i] = output_block_strides[i - 1];
- } else {
- input_block_sizes[i] = 1;
- }
- }
- // Fix up input_block_stride for chip dimension.
- if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
- if (chip_dim == 0) {
- input_block_strides[chip_dim] = 1;
- } else {
- input_block_strides[chip_dim] =
- input_block_strides[chip_dim - 1] * input_block_sizes[chip_dim - 1];
- }
- } else {
- if (chip_dim == NumInputDims - 1) {
- input_block_strides[chip_dim] = 1;
- } else {
- input_block_strides[chip_dim] =
- input_block_strides[chip_dim + 1] * input_block_sizes[chip_dim + 1];
- }
- }
- // Instantiate and read input block from input tensor.
- InputTensorBlock input_block(srcCoeff(output_block->first_coeff_index()),
- input_block_sizes, input_block_strides,
- m_inputStrides, output_block->data());
- m_impl.block(&input_block);
- }
-
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlockV2
blockV2(TensorBlockDesc& desc, TensorBlockScratch& scratch,
bool root_of_expr_ast = false) const {
@@ -482,7 +416,6 @@ struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
Index m_stride;
Index m_inputOffset;
Index m_inputStride;
- DSizes<Index, NumInputDims> m_inputStrides;
TensorEvaluator<ArgType, Device> m_impl;
const internal::DimensionId<DimId> m_dim;
const Device EIGEN_DEVICE_REF m_device;
@@ -508,7 +441,6 @@ struct TensorEvaluator<TensorChippingOp<DimId, ArgType>, Device>
enum {
IsAligned = false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
- BlockAccess = TensorEvaluator<ArgType, Device>::BlockAccess,
BlockAccessV2 = TensorEvaluator<ArgType, Device>::RawAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
RawAccess = false