diff options
Diffstat (limited to 'tensorflow/contrib/model_pruning')
-rw-r--r-- | tensorflow/contrib/model_pruning/README.md | 11 | ||||
-rw-r--r-- | tensorflow/contrib/model_pruning/python/learning.py | 11 | ||||
-rw-r--r-- | tensorflow/contrib/model_pruning/python/pruning.py | 8 |
3 files changed, 17 insertions, 13 deletions
diff --git a/tensorflow/contrib/model_pruning/README.md b/tensorflow/contrib/model_pruning/README.md index 86f4fd6adf..9143d082bf 100644 --- a/tensorflow/contrib/model_pruning/README.md +++ b/tensorflow/contrib/model_pruning/README.md @@ -66,10 +66,10 @@ is the sparsity_function_begin_step. In this equation, the sparsity_function_exponent is set to 3. ### Adding pruning ops to the training graph -The final step involves adding ops to the training graph that monitors the -distribution of the layer's weight magnitudes and determines the layer threshold -such masking all the weights below this threshold achieves the sparsity level -desired for the current training step. This can be achieved as follows: +The final step involves adding ops to the training graph that monitor the +distribution of the layer's weight magnitudes and determine the layer threshold, +such that masking all the weights below this threshold achieves the sparsity +level desired for the current training step. This can be achieved as follows: ```python tf.app.flags.DEFINE_string( @@ -79,7 +79,7 @@ tf.app.flags.DEFINE_string( with tf.graph.as_default(): # Create global step variable - global_step = tf.train.get_global_step() + global_step = tf.train.get_or_create_global_step() # Parse pruning hyperparameters pruning_hparams = pruning.get_pruning_hparams().parse(FLAGS.pruning_hparams) @@ -103,6 +103,7 @@ with tf.graph.as_default(): mon_sess.run(mask_update_op) ``` +Ensure that `global_step` is being [incremented](https://www.tensorflow.org/api_docs/python/tf/train/Optimizer#minimize), otherwise pruning will not work! ## Example: Pruning and training deep CNNs on the cifar10 dataset diff --git a/tensorflow/contrib/model_pruning/python/learning.py b/tensorflow/contrib/model_pruning/python/learning.py index 2b79c23cef..26695237c2 100644 --- a/tensorflow/contrib/model_pruning/python/learning.py +++ b/tensorflow/contrib/model_pruning/python/learning.py @@ -33,11 +33,14 @@ to support training of pruned models # Create the train_op train_op = slim.learning.create_train_op(total_loss, optimizer) - # Set up sparsity - sparsity = pruning.setup_gradual_sparsity(self.global_step) + # Parse pruning hyperparameters + pruning_hparams = pruning.get_pruning_hparams().parse(FLAGS.pruning_hparams) - # Create mask update op - mask_update_op = pruning.add_mask_update_ip(sparsity) + # Create a pruning object using the pruning_hparams + p = pruning.Pruning(pruning_hparams) + + # Add mask update ops to the graph + mask_update_op = p.conditional_mask_update_op() # Run training. learning.train(train_op, diff --git a/tensorflow/contrib/model_pruning/python/pruning.py b/tensorflow/contrib/model_pruning/python/pruning.py index 4b7af18b33..da9d398cbc 100644 --- a/tensorflow/contrib/model_pruning/python/pruning.py +++ b/tensorflow/contrib/model_pruning/python/pruning.py @@ -518,11 +518,11 @@ class Pruning(object): summary.scalar('last_mask_update_step', self._last_update_step) masks = get_masks() thresholds = get_thresholds() - for index, mask in enumerate(masks): + for mask, threshold in zip(masks, thresholds): if not self._exists_in_do_not_prune_list(mask.name): - summary.scalar(mask.name + '/sparsity', nn_impl.zero_fraction(mask)) - summary.scalar(thresholds[index].op.name + '/threshold', - thresholds[index]) + summary.scalar(mask.op.name + '/sparsity', + nn_impl.zero_fraction(mask)) + summary.scalar(threshold.op.name + '/threshold', threshold) def print_hparams(self): logging.info(self._spec.to_json()) |