aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/model_pruning
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-02-27 10:27:28 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-02-27 10:34:01 -0800
commite20be23387a6c1b72f3e34d03d4206c3211c921a (patch)
tree11e3d90ab8ddad8da4d1e502fedf1aae9223b288 /tensorflow/contrib/model_pruning
parente929b16dc89f62a41bcaba57b98ddd221bf9bf68 (diff)
Make block-based pruning more general, allowing it to operate on higher-dimensional arrays that can be squeezed to 2-dimensional.
PiperOrigin-RevId: 187195105
Diffstat (limited to 'tensorflow/contrib/model_pruning')
-rw-r--r--tensorflow/contrib/model_pruning/README.md2
-rw-r--r--tensorflow/contrib/model_pruning/python/pruning.py21
-rw-r--r--tensorflow/contrib/model_pruning/python/pruning_test.py17
3 files changed, 31 insertions, 9 deletions
diff --git a/tensorflow/contrib/model_pruning/README.md b/tensorflow/contrib/model_pruning/README.md
index d286750c25..52b659c69f 100644
--- a/tensorflow/contrib/model_pruning/README.md
+++ b/tensorflow/contrib/model_pruning/README.md
@@ -134,7 +134,7 @@ $ bazel-bin/$examples_dir/cifar10/cifar10_eval --run_once
### Block Sparsity
-For some hardware architectures, it may be beneficial to induce spatially correlated sparsity. To train models in which the weight tensors have block sparse structure, set *block_height* and *block_width* hyperparameters to the desired block configuration (2x2, 4x4, 4x1, 1x8, etc). Currently, block sparsity is supported for weight tensors with rank 2 only. The matrix is partitioned into non-overlapping blocks of size *[block_height, block_dim]* and the either the average or max absolute value in this block is taken as a proxy for the entire block (set by *block_pooling_function* hyperparameter).
+For some hardware architectures, it may be beneficial to induce spatially correlated sparsity. To train models in which the weight tensors have block sparse structure, set *block_height* and *block_width* hyperparameters to the desired block configuration (2x2, 4x4, 4x1, 1x8, etc). Currently, block sparsity is only supported for weight tensors which can be squeezed to rank 2. The matrix is partitioned into non-overlapping blocks of size *[block_height, block_dim]* and the either the average or max absolute value in this block is taken as a proxy for the entire block (set by *block_pooling_function* hyperparameter).
The convolution layer tensors are always pruned used block dimensions of [1,1].
## References
diff --git a/tensorflow/contrib/model_pruning/python/pruning.py b/tensorflow/contrib/model_pruning/python/pruning.py
index d16af9da19..86963be4b8 100644
--- a/tensorflow/contrib/model_pruning/python/pruning.py
+++ b/tensorflow/contrib/model_pruning/python/pruning.py
@@ -523,7 +523,8 @@ class Pruning(object):
"""Performs block-granular masking of the weights.
Block pruning occurs only if the block_height or block_width is > 1 and
- if the weight tensor has ndims = 2. Otherwise, elementwise pruning occurs.
+ if the weight tensor, when squeezed, has ndims = 2. Otherwise, elementwise
+ pruning occurs.
Args:
weights: The weight tensor that needs to be masked.
threshold: The current threshold value. The function will compute a new
@@ -540,7 +541,8 @@ class Pruning(object):
Raises:
ValueError: if block pooling function is not AVG or MAX
"""
- if weights.get_shape().ndims != 2 or self._block_dim == [1, 1]:
+ squeezed_weights = array_ops.squeeze(weights)
+ if squeezed_weights.get_shape().ndims != 2 or self._block_dim == [1, 1]:
return self._update_mask(weights, threshold)
if self._block_pooling_function not in ['AVG', 'MAX']:
@@ -549,9 +551,11 @@ class Pruning(object):
with ops.name_scope(weights.op.name + '_pruning_ops'):
abs_weights = math_ops.abs(
- array_ops.reshape(
- weights, [1, weights.get_shape()[0],
- weights.get_shape()[1], 1]))
+ array_ops.reshape(weights, [
+ 1,
+ squeezed_weights.get_shape()[0],
+ squeezed_weights.get_shape()[1], 1
+ ]))
pool_window = [self._block_dim[0], self._block_dim[1]]
pooled_weights = nn_ops.pool(
abs_weights,
@@ -572,9 +576,10 @@ class Pruning(object):
array_ops.ones(self._block_dim))
sliced_mask = array_ops.slice(
updated_mask, [0, 0],
- [weights.get_shape()[0],
- weights.get_shape()[1]])
- return smoothed_threshold, sliced_mask
+ [squeezed_weights.get_shape()[0],
+ squeezed_weights.get_shape()[1]])
+ return smoothed_threshold, array_ops.reshape(sliced_mask,
+ array_ops.shape(weights))
def _get_mask_assign_ops(self):
# Make sure the assignment ops have not already been added to the list
diff --git a/tensorflow/contrib/model_pruning/python/pruning_test.py b/tensorflow/contrib/model_pruning/python/pruning_test.py
index 1767b4bb94..89e6571319 100644
--- a/tensorflow/contrib/model_pruning/python/pruning_test.py
+++ b/tensorflow/contrib/model_pruning/python/pruning_test.py
@@ -142,6 +142,23 @@ class PruningTest(test.TestCase):
self._blockMasking(param_list + ["block_pooling_function=MAX"], weights_max,
expected_mask)
+ self._blockMasking(param_list + ["block_pooling_function=AVG"], weights_avg,
+ expected_mask)
+
+ def testBlockMaskingWithHigherDimensions(self):
+ param_list = ["block_height=2", "block_width=2", "threshold_decay=0"]
+
+ # Weights as in testBlockMasking, but with one extra dimension.
+ weights_avg = constant_op.constant(
+ [[[0.1, 0.1, 0.2, 0.2], [0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4],
+ [0.3, 0.3, 0.4, 0.4]]])
+ weights_max = constant_op.constant(
+ [[[0.1, 0.0, 0.2, 0.0], [0.0, -0.1, 0.0, -0.2], [0.3, 0.0, 0.4, 0.0],
+ [0.0, -0.3, 0.0, -0.4]]])
+ expected_mask = [[[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1]]]
+
+ self._blockMasking(param_list + ["block_pooling_function=MAX"], weights_max,
+ expected_mask)
self._blockMasking(param_list + ["block_pooling_function=AVG"],
weights_avg, expected_mask)