aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/fused_conv
diff options
context:
space:
mode:
authorGravatar Jingyue Wu <jingyue@google.com>2018-06-12 15:48:17 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-06-12 15:50:57 -0700
commit19011fa34be3590f6d7c3b574687d53b1dea6a1f (patch)
tree7900091e4df218f4c79eb8b87142c7938013c07e /tensorflow/contrib/fused_conv
parent160daedf1d923b74f881cefc00d99978bcfc542e (diff)
Temporarily disable Grappler memory optimization for fused_conv tests.
PiperOrigin-RevId: 200294932
Diffstat (limited to 'tensorflow/contrib/fused_conv')
-rw-r--r--tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op_test.py20
1 files changed, 17 insertions, 3 deletions
diff --git a/tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op_test.py b/tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op_test.py
index 3d0ed89932..65cb94b5a4 100644
--- a/tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op_test.py
+++ b/tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op_test.py
@@ -21,6 +21,8 @@ from __future__ import print_function
import numpy as np
from tensorflow.contrib.fused_conv.python.ops import fused_conv2d_bias_activation_op
+from tensorflow.core.protobuf import config_pb2
+from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
@@ -33,6 +35,13 @@ from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
+def NoMemoryOptimizationConfig():
+ config = config_pb2.ConfigProto()
+ config.graph_options.rewrite_options.memory_optimization = (
+ rewriter_config_pb2.RewriterConfig.OFF)
+ return config
+
+
def GetShrunkInceptionShapes(shrink=10):
"""Iterator for smaller versions of convolution shapes in 2015 Inception.
@@ -193,7 +202,8 @@ class FusedConv2DBiasActivationTest(test.TestCase):
# This is to guarantee that there is always negative values after
# bias add so that we can test whether relu works correctly.
x3 = bias
- with self.test_session(use_gpu=True):
+ # TODO(b/79323979): re-enable memory optimization after this bug is fixed.
+ with self.test_session(use_gpu=True, config=NoMemoryOptimizationConfig()):
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
fused_t2 = t2
@@ -241,7 +251,9 @@ class FusedConv2DBiasActivationTest(test.TestCase):
x3 = np.random.rand(*[filter_in_sizes[-1]]).astype(np.float32)
def _SetupVal(data_format, use_gpu):
- with self.test_session(use_gpu=use_gpu):
+ # TODO(b/79323979): re-enable memory optimization after this bug is fixed.
+ with self.test_session(
+ use_gpu=use_gpu, config=NoMemoryOptimizationConfig()):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
t3 = constant_op.constant(x3, shape=[filter_in_sizes[-1]])
@@ -864,7 +876,9 @@ class FusedConvInt8Tests(test.TestCase):
conv_input_scale, conv_input, kernel, padding_type, strides,
side_input_scale, side_input, biases)
- with self.test_session(use_gpu=True) as sess:
+ # TODO(b/79323979): re-enable memory optimization after this bug is fixed.
+ with self.test_session(
+ use_gpu=True, config=NoMemoryOptimizationConfig()) as sess:
actual_y, expected_y = sess.run([actual, expected])
print("actual_y = ", actual_y)
print("expected_y = ", expected_y)