aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/fused_conv
diff options
context:
space:
mode:
authorGravatar Yao Zhang <yaozhang@google.com>2018-06-20 18:36:13 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-06-20 18:39:22 -0700
commit96dfcc2fdc9f3a7419d3d5c5a64489e757de624e (patch)
tree8c684731bde1643158037bf1d4ed17e58c95096a /tensorflow/contrib/fused_conv
parente8b18a6f0c02d364ff47ba5fa3dc61458d273674 (diff)
Support filter format for FusedConv2DBiasActivation.
PiperOrigin-RevId: 201454730
Diffstat (limited to 'tensorflow/contrib/fused_conv')
-rw-r--r--tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op_test.py20
1 files changed, 3 insertions, 17 deletions
diff --git a/tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op_test.py b/tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op_test.py
index a955e21b72..4d62ac65ff 100644
--- a/tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op_test.py
+++ b/tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op_test.py
@@ -21,8 +21,6 @@ from __future__ import print_function
import numpy as np
from tensorflow.contrib.fused_conv.python.ops import fused_conv2d_bias_activation_op
-from tensorflow.core.protobuf import config_pb2
-from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
@@ -35,13 +33,6 @@ from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
-def NoMemoryOptimizationConfig():
- config = config_pb2.ConfigProto()
- config.graph_options.rewrite_options.memory_optimization = (
- rewriter_config_pb2.RewriterConfig.OFF)
- return config
-
-
def GetShrunkInceptionShapes(shrink=10):
"""Iterator for smaller versions of convolution shapes in 2015 Inception.
@@ -202,8 +193,7 @@ class FusedConv2DBiasActivationTest(test.TestCase):
# This is to guarantee that there is always negative values after
# bias add so that we can test whether relu works correctly.
x3 = bias
- # TODO(b/79323979): re-enable memory optimization after this bug is fixed.
- with self.test_session(use_gpu=True, config=NoMemoryOptimizationConfig()):
+ with self.test_session(use_gpu=True):
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
fused_t2 = t2
@@ -251,9 +241,7 @@ class FusedConv2DBiasActivationTest(test.TestCase):
x3 = np.random.rand(*[filter_in_sizes[-1]]).astype(np.float32)
def _SetupVal(data_format, use_gpu):
- # TODO(b/79323979): re-enable memory optimization after this bug is fixed.
- with self.test_session(
- use_gpu=use_gpu, config=NoMemoryOptimizationConfig()):
+ with self.test_session(use_gpu=use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
t3 = constant_op.constant(x3, shape=[filter_in_sizes[-1]])
@@ -877,9 +865,7 @@ class FusedConvInt8Tests(test.TestCase):
conv_input_scale, conv_input, kernel, padding_type, strides,
side_input_scale, side_input, biases)
- # TODO(b/79323979): re-enable memory optimization after this bug is fixed.
- with self.test_session(
- use_gpu=True, config=NoMemoryOptimizationConfig()) as sess:
+ with self.test_session(use_gpu=True) as sess:
actual_y, expected_y = sess.run([actual, expected])
tf_logging.info("actual_y = ", actual_y)
tf_logging.info("expected_y = ", expected_y)