aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/python/layers
diff options
context:
space:
mode:
authorGravatar James Qin <jamesqin@google.com>2018-06-14 02:02:26 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-06-14 02:04:54 -0700
commit0b8c5806f4f1d3a47b30bf203b3e456f036b0adc (patch)
tree3bb6da10c88306134dda7f530e470200e56b7276 /tensorflow/python/layers
parentc570211c5cd972a278366d3d3fd65ee8f99836aa (diff)
Remove hardcoded dtype in tf.layers.xxx() function call to make them compatible with mixed precision training apis.
tf.layers.foolayer(inputs) creates a tf.layer.FooLayer(dtype=inputs.dtype) and immediately invokes __call__() on the input. The dtype in the Foolayer() constructor isn't needed. Plus it stands in the way for global mixed precision dtype we plan to add in the future. PiperOrigin-RevId: 200524027
Diffstat (limited to 'tensorflow/python/layers')
-rw-r--r--tensorflow/python/layers/convolutional.py5
-rw-r--r--tensorflow/python/layers/core.py1
-rw-r--r--tensorflow/python/layers/normalization.py1
3 files changed, 0 insertions, 7 deletions
diff --git a/tensorflow/python/layers/convolutional.py b/tensorflow/python/layers/convolutional.py
index 267d78dbcb..36cef3855e 100644
--- a/tensorflow/python/layers/convolutional.py
+++ b/tensorflow/python/layers/convolutional.py
@@ -217,7 +217,6 @@ def conv1d(inputs,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
- dtype=inputs.dtype.base_dtype,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
@@ -421,7 +420,6 @@ def conv2d(inputs,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
- dtype=inputs.dtype.base_dtype,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
@@ -627,7 +625,6 @@ def conv3d(inputs,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
- dtype=inputs.dtype.base_dtype,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
@@ -1266,7 +1263,6 @@ def conv2d_transpose(inputs,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
- dtype=inputs.dtype.base_dtype,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
@@ -1438,7 +1434,6 @@ def conv3d_transpose(inputs,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
- dtype=inputs.dtype.base_dtype,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
diff --git a/tensorflow/python/layers/core.py b/tensorflow/python/layers/core.py
index abbacac442..aadff231da 100644
--- a/tensorflow/python/layers/core.py
+++ b/tensorflow/python/layers/core.py
@@ -184,7 +184,6 @@ def dense(
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
- dtype=inputs.dtype.base_dtype,
_scope=name,
_reuse=reuse)
return layer.apply(inputs)
diff --git a/tensorflow/python/layers/normalization.py b/tensorflow/python/layers/normalization.py
index d082e312e9..ece6667981 100644
--- a/tensorflow/python/layers/normalization.py
+++ b/tensorflow/python/layers/normalization.py
@@ -308,7 +308,6 @@ def batch_normalization(inputs,
virtual_batch_size=virtual_batch_size,
adjustment=adjustment,
name=name,
- dtype=inputs.dtype.base_dtype,
_reuse=reuse,
_scope=name)
return layer.apply(inputs, training=training)