diff options
author | 2018-06-22 01:46:03 -0700 | |
---|---|---|
committer | 2018-06-22 01:49:29 -0700 | |
commit | 945d1a77aebb2071b571598cb1d02fac5b1370c1 (patch) | |
tree | efce5ed23c87ad2460916ad1b08211ee6359a98c /tensorflow/python/layers | |
parent | 9682324b40ed36963cced138e21de29518d6843c (diff) |
Replace unnecessary `()` in `run_in_graph_and_eager_modes()`.
PiperOrigin-RevId: 201652888
Diffstat (limited to 'tensorflow/python/layers')
-rw-r--r-- | tensorflow/python/layers/base_test.py | 30 | ||||
-rw-r--r-- | tensorflow/python/layers/core_test.py | 22 |
2 files changed, 26 insertions, 26 deletions
diff --git a/tensorflow/python/layers/base_test.py b/tensorflow/python/layers/base_test.py index fcacc8d603..298e96e711 100644 --- a/tensorflow/python/layers/base_test.py +++ b/tensorflow/python/layers/base_test.py @@ -39,7 +39,7 @@ from tensorflow.python.platform import test class BaseLayerTest(test.TestCase): - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testLayerProperties(self): layer = base_layers.Layer(name='my_layer') self.assertEqual(layer.variables, []) @@ -53,13 +53,13 @@ class BaseLayerTest(test.TestCase): layer = base_layers.Layer(name='my_layer', trainable=False) self.assertEqual(layer.trainable, False) - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testInt64Layer(self): layer = base_layers.Layer(name='my_layer', dtype='int64') layer.add_variable('my_var', [2, 2]) self.assertEqual(layer.name, 'my_layer') - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testAddWeight(self): layer = base_layers.Layer(name='my_layer') @@ -116,7 +116,7 @@ class BaseLayerTest(test.TestCase): with self.assertRaisesRegexp(ValueError, 'activity_regularizer'): core_layers.Dense(1, activity_regularizer=lambda *args, **kwargs: 0.) - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testCall(self): class MyLayer(base_layers.Layer): @@ -132,7 +132,7 @@ class BaseLayerTest(test.TestCase): # op is only supported in GRAPH mode self.assertEqual(outputs.op.name, 'my_layer/Square') - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testDeepCopy(self): class MyLayer(base_layers.Layer): @@ -155,7 +155,7 @@ class BaseLayerTest(test.TestCase): self.assertEqual(layer_copy._graph, layer._graph) self.assertEqual(layer_copy._private_tensor, layer._private_tensor) - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testScopeNaming(self): class PrivateLayer(base_layers.Layer): @@ -203,7 +203,7 @@ class BaseLayerTest(test.TestCase): my_layer_scoped1.apply(inputs) self.assertEqual(my_layer_scoped1._scope.name, 'var_scope/my_layer_1') - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testInputSpecNdimCheck(self): class CustomerLayer(base_layers.Layer): @@ -230,7 +230,7 @@ class BaseLayerTest(test.TestCase): layer = CustomerLayer() layer.apply(constant_op.constant([[1], [2]])) - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testInputSpecMinNdimCheck(self): class CustomerLayer(base_layers.Layer): @@ -258,7 +258,7 @@ class BaseLayerTest(test.TestCase): layer = CustomerLayer() layer.apply(constant_op.constant([[[1], [2]]])) - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testInputSpecMaxNdimCheck(self): class CustomerLayer(base_layers.Layer): @@ -286,7 +286,7 @@ class BaseLayerTest(test.TestCase): layer = CustomerLayer() layer.apply(constant_op.constant([[1], [2]])) - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testInputSpecDtypeCheck(self): class CustomerLayer(base_layers.Layer): @@ -306,7 +306,7 @@ class BaseLayerTest(test.TestCase): layer = CustomerLayer() layer.apply(constant_op.constant(1.0, dtype=dtypes.float32)) - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testInputSpecAxesCheck(self): class CustomerLayer(base_layers.Layer): @@ -328,7 +328,7 @@ class BaseLayerTest(test.TestCase): layer = CustomerLayer() layer.apply(constant_op.constant([[1, 2], [3, 4], [5, 6]])) - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testInputSpecShapeCheck(self): class CustomerLayer(base_layers.Layer): @@ -348,7 +348,7 @@ class BaseLayerTest(test.TestCase): layer = CustomerLayer() layer.apply(constant_op.constant([[1, 2, 3], [4, 5, 6]])) - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testNoInputSpec(self): class CustomerLayer(base_layers.Layer): @@ -369,7 +369,7 @@ class BaseLayerTest(test.TestCase): layer.apply(array_ops.placeholder('int32')) layer.apply(array_ops.placeholder('int32', shape=(2, 3))) - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def test_count_params(self): dense = core_layers.Dense(16) dense.build((None, 4)) @@ -379,7 +379,7 @@ class BaseLayerTest(test.TestCase): with self.assertRaises(ValueError): dense.count_params() - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testDictInputOutput(self): class DictLayer(base_layers.Layer): diff --git a/tensorflow/python/layers/core_test.py b/tensorflow/python/layers/core_test.py index cf45b07637..040c1cddc0 100644 --- a/tensorflow/python/layers/core_test.py +++ b/tensorflow/python/layers/core_test.py @@ -41,7 +41,7 @@ from tensorflow.python.platform import test class DenseTest(test.TestCase): - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testDenseProperties(self): dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense') self.assertEqual(dense.units, 2) @@ -91,14 +91,14 @@ class DenseTest(test.TestCase): core_layers.Dense(5)(inputs) core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')(inputs) - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testCallTensorDot(self): dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense') inputs = random_ops.random_uniform((5, 4, 3), seed=1) outputs = dense(inputs) self.assertListEqual([5, 4, 2], outputs.get_shape().as_list()) - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testNoBias(self): dense = core_layers.Dense(2, use_bias=False, name='my_dense') inputs = random_ops.random_uniform((5, 2), seed=1) @@ -112,7 +112,7 @@ class DenseTest(test.TestCase): self.assertEqual(dense.kernel.name, 'my_dense/kernel:0') self.assertEqual(dense.bias, None) - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testNonTrainable(self): dense = core_layers.Dense(2, trainable=False, name='my_dense') inputs = random_ops.random_uniform((5, 2), seed=1) @@ -125,7 +125,7 @@ class DenseTest(test.TestCase): self.assertEqual( len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 0) - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testOutputShape(self): dense = core_layers.Dense(7, activation=nn_ops.relu, name='my_dense') inputs = random_ops.random_uniform((5, 3), seed=1) @@ -165,7 +165,7 @@ class DenseTest(test.TestCase): dense = core_layers.Dense(4, name='my_dense') dense(inputs) - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testActivation(self): dense = core_layers.Dense(2, activation=nn_ops.relu, name='dense1') inputs = random_ops.random_uniform((5, 3), seed=1) @@ -325,7 +325,7 @@ class DenseTest(test.TestCase): var_key = 'test2/dense/kernel' self.assertEqual(var_dict[var_key].name, '%s:0' % var_key) - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testComputeOutputShape(self): dense = core_layers.Dense(2, activation=nn_ops.relu, name='dense1') ts = tensor_shape.TensorShape @@ -347,7 +347,7 @@ class DenseTest(test.TestCase): dense.compute_output_shape(ts([None, 4, 3])).as_list()) # pylint: enable=protected-access - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testConstraints(self): k_constraint = lambda x: x / math_ops.reduce_sum(x) b_constraint = lambda x: x / math_ops.reduce_max(x) @@ -369,7 +369,7 @@ def _get_variable_dict_from_varstore(): class DropoutTest(test.TestCase): - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testDropoutProperties(self): dp = core_layers.Dropout(0.5, name='dropout') self.assertEqual(dp.rate, 0.5) @@ -377,7 +377,7 @@ class DropoutTest(test.TestCase): dp.apply(array_ops.ones(())) self.assertEqual(dp.name, 'dropout') - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testBooleanLearningPhase(self): dp = core_layers.Dropout(0.5) inputs = array_ops.ones((5, 3)) @@ -402,7 +402,7 @@ class DropoutTest(test.TestCase): np_output = sess.run(dropped, feed_dict={training: False}) self.assertAllClose(np.ones((5, 5)), np_output) - @test_util.run_in_graph_and_eager_modes() + @test_util.run_in_graph_and_eager_modes def testDynamicNoiseShape(self): inputs = array_ops.ones((5, 3, 2)) noise_shape = [None, 1, None] |