diff options
author | James Qin <jamesqin@google.com> | 2018-06-19 04:15:27 -0700 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2018-06-19 04:18:10 -0700 |
commit | 707ac111cfed90f35c37417d8c79ab7cbcba152a (patch) | |
tree | e5ff07df7d5516cfbcee7640266ff88167440f8f /tensorflow/python/layers | |
parent | fc6ff59c0c12bedbd1ca32000a24ae9e64c0b661 (diff) |
Update a few documentation for layer-input-casting feature.
PiperOrigin-RevId: 201152785
Diffstat (limited to 'tensorflow/python/layers')
-rw-r--r-- | tensorflow/python/layers/base_test.py | 16 |
1 files changed, 9 insertions, 7 deletions
diff --git a/tensorflow/python/layers/base_test.py b/tensorflow/python/layers/base_test.py index 15448c6be8..ad44328aab 100644 --- a/tensorflow/python/layers/base_test.py +++ b/tensorflow/python/layers/base_test.py @@ -593,7 +593,8 @@ class BaseLayerTest(test.TestCase): @test_util.run_in_graph_and_eager_modes() def testOnlyCastInputsWhenDtypeSpecified(self): - class MyLayerBase(keras_base_layer.Layer): + + class MyKerasLayer(keras_base_layer.Layer): def call(self, inputs): self.x = inputs[0] @@ -603,13 +604,13 @@ class BaseLayerTest(test.TestCase): # Inherit from both the Keras Layer and base_layers.Layer to ensure we # still get the base_layers.Layer behavior when directly inheriting from # the Keras Layer. - class MyLayer(MyLayerBase, base_layers.Layer): + class MyTFLayer(MyKerasLayer, base_layers.Layer): pass # Test inputs are casted. input1 = array_ops.constant(1.0, dtype=dtypes.float64) input2 = array_ops.constant(1.0, dtype=dtypes.float32) - layer = MyLayer(dtype=dtypes.float16) + layer = MyTFLayer(dtype=dtypes.float16) output1, output2 = layer([input1, input2]) self.assertEqual(output1.dtype, dtypes.float16) self.assertEqual(output2.dtype, dtypes.float16) @@ -617,14 +618,15 @@ class BaseLayerTest(test.TestCase): # Test inputs are not casted. input1 = array_ops.constant(1.0, dtype=dtypes.float64) input2 = array_ops.constant(1.0, dtype=dtypes.float32) - layer = MyLayer() + layer = MyTFLayer() output1, output2 = layer([input1, input2]) self.assertEqual(output1.dtype, dtypes.float64) self.assertEqual(output2.dtype, dtypes.float32) @test_util.run_in_graph_and_eager_modes() def testVariablesDefaultToFloat32(self): - class MyLayerBase(keras_base_layer.Layer): + + class MyKerasLayer(keras_base_layer.Layer): def build(self, input_shape): self.x = self.add_weight('x', ()) @@ -635,14 +637,14 @@ class BaseLayerTest(test.TestCase): # Inherit from both the Keras Layer and base_layers.Layer to ensure we # still get the base_layers.Layer behavior when directly inheriting from # the Keras Layer. - class MyLayer(MyLayerBase, base_layers.Layer): + class MyTFLayer(MyKerasLayer, base_layers.Layer): pass try: # The behavior of Keras Layers is to default to floatx. Ensure that this # behavior is overridden to instead default to float32. backend.set_floatx('float16') - layer = MyLayer() + layer = MyTFLayer() layer.build(()) self.assertEqual(layer.dtype, None) self.assertEqual(layer.x.dtype.base_dtype, dtypes.float32) |