aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--tensorflow/contrib/cudnn_rnn/python/ops/cudnn_rnn_ops.py6
-rw-r--r--tensorflow/contrib/ffmpeg/ffmpeg_ops.py7
-rw-r--r--tensorflow/contrib/framework/python/ops/variables.py4
-rw-r--r--tensorflow/contrib/layers/python/ops/sparse_feature_cross_op.py4
-rw-r--r--tensorflow/contrib/metrics/python/ops/set_ops.py9
-rw-r--r--tensorflow/contrib/rnn/python/ops/gru_ops.py7
-rw-r--r--tensorflow/contrib/rnn/python/ops/lstm_ops.py7
-rw-r--r--tensorflow/contrib/tensor_forest/data/data_ops.py5
-rw-r--r--tensorflow/contrib/tensor_forest/hybrid/python/ops/training_ops.py15
-rw-r--r--tensorflow/contrib/tensor_forest/python/ops/inference_ops.py4
-rw-r--r--tensorflow/contrib/tensor_forest/python/ops/topn_ops.py5
-rw-r--r--tensorflow/contrib/tensor_forest/python/ops/training_ops.py10
-rw-r--r--tensorflow/python/framework/constant_op.py4
-rw-r--r--tensorflow/python/ops/array_ops.py28
-rw-r--r--tensorflow/python/ops/nn_ops.py6
-rw-r--r--tensorflow/python/ops/random_ops.py3
-rw-r--r--tensorflow/python/training/training_ops.py47
17 files changed, 2 insertions, 169 deletions
diff --git a/tensorflow/contrib/cudnn_rnn/python/ops/cudnn_rnn_ops.py b/tensorflow/contrib/cudnn_rnn/python/ops/cudnn_rnn_ops.py
index 6ef7027bc6..4a3120dcb8 100644
--- a/tensorflow/contrib/cudnn_rnn/python/ops/cudnn_rnn_ops.py
+++ b/tensorflow/contrib/cudnn_rnn/python/ops/cudnn_rnn_ops.py
@@ -19,7 +19,6 @@ from __future__ import print_function
from tensorflow.contrib.cudnn_rnn.ops import gen_cudnn_rnn_ops
from tensorflow.contrib.util import loader
-from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
@@ -315,8 +314,3 @@ def _cudnn_rnn_backward(op, *grad):
rnn_mode=op.get_attr("rnn_mode"),
input_mode=op.get_attr("input_mode"),
direction=op.get_attr("direction"))
-
-
-ops.RegisterShape("CudnnRNNParamsSize")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("CudnnRNN")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("CudnnRNNBackprop")(common_shapes.call_cpp_shape_fn)
diff --git a/tensorflow/contrib/ffmpeg/ffmpeg_ops.py b/tensorflow/contrib/ffmpeg/ffmpeg_ops.py
index ed218074ae..d9159c505f 100644
--- a/tensorflow/contrib/ffmpeg/ffmpeg_ops.py
+++ b/tensorflow/contrib/ffmpeg/ffmpeg_ops.py
@@ -22,7 +22,6 @@ import os
from tensorflow.contrib.ffmpeg.ops import gen_decode_audio_op_py
from tensorflow.contrib.ffmpeg.ops import gen_encode_audio_op_py
-from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import errors
from tensorflow.python.framework import load_library
from tensorflow.python.framework import ops
@@ -30,9 +29,6 @@ from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import tf_logging as logging
-ops.RegisterShape('DecodeAudio')(common_shapes.call_cpp_shape_fn)
-
-
def decode_audio(contents, file_format=None, samples_per_second=None,
channel_count=None):
"""Create an op that decodes the contents of an audio file.
@@ -67,9 +63,6 @@ def decode_audio(contents, file_format=None, samples_per_second=None,
ops.NotDifferentiable('DecodeAudio')
-ops.RegisterShape('EncodeAudio')(common_shapes.call_cpp_shape_fn)
-
-
def encode_audio(audio, file_format=None, samples_per_second=None):
"""Creates an op that encodes an audio file using sampled audio from a tensor.
diff --git a/tensorflow/contrib/framework/python/ops/variables.py b/tensorflow/contrib/framework/python/ops/variables.py
index aedadb970b..4878d27167 100644
--- a/tensorflow/contrib/framework/python/ops/variables.py
+++ b/tensorflow/contrib/framework/python/ops/variables.py
@@ -23,7 +23,6 @@ from tensorflow.contrib.framework.python.ops import add_arg_scope as contrib_add
from tensorflow.contrib.framework.python.ops import gen_variable_ops
from tensorflow.contrib.util import loader
from tensorflow.python import pywrap_tensorflow
-from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
@@ -80,9 +79,6 @@ def zero_initializer(ref, use_locking=True, name="zero_initializer"):
return gen_variable_ops.zero_initializer(ref, name=name)
-ops.RegisterShape('ZeroInitializer')(common_shapes.call_cpp_shape_fn)
-
-
def assert_global_step(global_step_tensor):
training_util.assert_global_step(global_step_tensor)
diff --git a/tensorflow/contrib/layers/python/ops/sparse_feature_cross_op.py b/tensorflow/contrib/layers/python/ops/sparse_feature_cross_op.py
index 35edf280ef..e4141c6b6d 100644
--- a/tensorflow/contrib/layers/python/ops/sparse_feature_cross_op.py
+++ b/tensorflow/contrib/layers/python/ops/sparse_feature_cross_op.py
@@ -123,7 +123,7 @@ def sparse_feature_cross(inputs, hashed_output=False, num_buckets=0,
return sparse_tensor.SparseTensor(indices_out, values_out, shape_out)
-ops.RegisterShape("SparseFeatureCross")(common_shapes.call_cpp_shape_fn)
ops.NotDifferentiable("SparseFeatureCross")
-ops.RegisterShape("SparseFeatureCrossV2")(common_shapes.call_cpp_shape_fn)
+
+
ops.NotDifferentiable("SparseFeatureCrossV2")
diff --git a/tensorflow/contrib/metrics/python/ops/set_ops.py b/tensorflow/contrib/metrics/python/ops/set_ops.py
index c4c894cc0f..dd737a14c2 100644
--- a/tensorflow/contrib/metrics/python/ops/set_ops.py
+++ b/tensorflow/contrib/metrics/python/ops/set_ops.py
@@ -20,7 +20,6 @@ from __future__ import print_function
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.contrib.util import loader
-from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
@@ -35,9 +34,6 @@ _VALID_DTYPES = set([
dtypes.uint8, dtypes.uint16, dtypes.string])
-ops.RegisterShape("SetSize")(common_shapes.call_cpp_shape_fn)
-
-
def set_size(a, validate_indices=True):
"""Compute number of unique elements along last dimension of `a`.
@@ -65,11 +61,6 @@ def set_size(a, validate_indices=True):
ops.NotDifferentiable("SetSize")
-ops.RegisterShape("DenseToDenseSetOperation")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("DenseToSparseSetOperation")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("SparseToSparseSetOperation")(common_shapes.call_cpp_shape_fn)
-
-
ops.NotDifferentiable("DenseToDenseSetOperation")
ops.NotDifferentiable("DenseToSparseSetOperation")
ops.NotDifferentiable("SparseToSparseSetOperation")
diff --git a/tensorflow/contrib/rnn/python/ops/gru_ops.py b/tensorflow/contrib/rnn/python/ops/gru_ops.py
index 28641ad34f..5cd2f21a0d 100644
--- a/tensorflow/contrib/rnn/python/ops/gru_ops.py
+++ b/tensorflow/contrib/rnn/python/ops/gru_ops.py
@@ -18,7 +18,6 @@ from __future__ import division
from __future__ import print_function
from tensorflow.contrib.util import loader
-from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
@@ -32,9 +31,6 @@ _gru_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_gru_ops.so"))
-ops.RegisterShape("GRUBlockCellGrad")(common_shapes.call_cpp_shape_fn)
-
-
@ops.RegisterGradient("GRUBlockCell")
def _GRUBlockCellGrad(op, *grad):
r"""Gradient for GRUBlockCell.
@@ -97,9 +93,6 @@ def _GRUBlockCellGrad(op, *grad):
return d_x, d_h_prev, d_w_ru, d_w_c, d_b_ru, d_b_c
-ops.RegisterShape("GRUBlockCell")(common_shapes.call_cpp_shape_fn)
-
-
class GRUBlockCell(rnn_cell.RNNCell):
r"""Block GRU cell implementation.
diff --git a/tensorflow/contrib/rnn/python/ops/lstm_ops.py b/tensorflow/contrib/rnn/python/ops/lstm_ops.py
index 5ab571b1bb..df7ab78897 100644
--- a/tensorflow/contrib/rnn/python/ops/lstm_ops.py
+++ b/tensorflow/contrib/rnn/python/ops/lstm_ops.py
@@ -292,10 +292,6 @@ def _LSTMBlockCellGrad(op, *grad):
wco_grad, b_grad)
-ops.RegisterShape("LSTMBlockCellGrad")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("BlockLSTM")(common_shapes.call_cpp_shape_fn)
-
-
@ops.RegisterGradient("BlockLSTM")
def _BlockLSTMGrad(op, *grad):
"""Gradient for BlockLSTM."""
@@ -331,9 +327,6 @@ def _BlockLSTMGrad(op, *grad):
wcf_grad, b_grad]
-ops.RegisterShape("BlockLSTMGrad")(common_shapes.call_cpp_shape_fn)
-
-
class LSTMBlockCell(rnn_cell.RNNCell):
"""Basic LSTM recurrent network cell.
diff --git a/tensorflow/contrib/tensor_forest/data/data_ops.py b/tensorflow/contrib/tensor_forest/data/data_ops.py
index 80798d3e48..f7b1c1e190 100644
--- a/tensorflow/contrib/tensor_forest/data/data_ops.py
+++ b/tensorflow/contrib/tensor_forest/data/data_ops.py
@@ -21,7 +21,6 @@ import threading
from tensorflow.contrib.tensor_forest.python import constants
-from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import load_library
from tensorflow.python.framework import ops
@@ -41,10 +40,6 @@ ops.NotDifferentiable('SparseValuesToIndices')
ops.NotDifferentiable('StringToFloat')
-ops.RegisterShape('SparseValuesToIndices')(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape('StringToFloat')(common_shapes.call_cpp_shape_fn)
-
-
# Workaround for the fact that importing tensorflow imports contrib
# (even if a user isn't using this or any other contrib op), but
# there's not yet any guarantee that the shared object exists.
diff --git a/tensorflow/contrib/tensor_forest/hybrid/python/ops/training_ops.py b/tensorflow/contrib/tensor_forest/hybrid/python/ops/training_ops.py
index 5d942b75ae..940420416c 100644
--- a/tensorflow/contrib/tensor_forest/hybrid/python/ops/training_ops.py
+++ b/tensorflow/contrib/tensor_forest/hybrid/python/ops/training_ops.py
@@ -19,7 +19,6 @@ from __future__ import print_function
import threading
-from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import load_library
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
@@ -42,20 +41,6 @@ ops.NotDifferentiable('KFeatureWeightGradient')
ops.NotDifferentiable('UnpackPath')
-ops.RegisterShape('RoutingFunction')(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape('KFeatureRoutingFunction')(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape('HardRoutingFunction')(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape('StochasticHardRoutingFunction')(
- common_shapes.call_cpp_shape_fn)
-ops.RegisterShape('StochasticHardRoutingGradient')(
- common_shapes.call_cpp_shape_fn)
-ops.RegisterShape('UnpackPath')(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape('RoutingGradient')(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape('KFeatureDataGradient')(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape('KFeatureRoutingGradient')(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape('KFeatureWeightGradient')(common_shapes.call_cpp_shape_fn)
-
-
@ops.RegisterGradient('RoutingFunction')
def _RoutingFunctionGradient(op, grad):
"""The gradient of RoutingFunction.
diff --git a/tensorflow/contrib/tensor_forest/python/ops/inference_ops.py b/tensorflow/contrib/tensor_forest/python/ops/inference_ops.py
index 62cda176a7..349cdc7b07 100644
--- a/tensorflow/contrib/tensor_forest/python/ops/inference_ops.py
+++ b/tensorflow/contrib/tensor_forest/python/ops/inference_ops.py
@@ -19,7 +19,6 @@ from __future__ import print_function
import threading
-from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import load_library
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
@@ -37,9 +36,6 @@ _ops_lock = threading.Lock()
ops.NotDifferentiable('TreePredictions')
-ops.RegisterShape('TreePredictions')(common_shapes.call_cpp_shape_fn)
-
-
# Workaround for the fact that importing tensorflow imports contrib
# (even if a user isn't using this or any other contrib op), but
# there's not yet any guarantee that the shared object exists.
diff --git a/tensorflow/contrib/tensor_forest/python/ops/topn_ops.py b/tensorflow/contrib/tensor_forest/python/ops/topn_ops.py
index 7f578e2737..ade7abd20e 100644
--- a/tensorflow/contrib/tensor_forest/python/ops/topn_ops.py
+++ b/tensorflow/contrib/tensor_forest/python/ops/topn_ops.py
@@ -21,7 +21,6 @@ import threading
import tensorflow as tf
-from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import ops
TOPN_OPS_FILE = '_topn_ops.so'
@@ -33,10 +32,6 @@ ops.NotDifferentiable('TopNInsert')
ops.NotDifferentiable('TopNRemove')
-ops.RegisterShape('TopNInsert')(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape('TopNRemove')(common_shapes.call_cpp_shape_fn)
-
-
# Workaround for the fact that importing tensorflow imports contrib
# (even if a user isn't using this or any other contrib op), but
# there's not yet any guarantee that the shared object exists.
diff --git a/tensorflow/contrib/tensor_forest/python/ops/training_ops.py b/tensorflow/contrib/tensor_forest/python/ops/training_ops.py
index b091ca463e..8115c80ee4 100644
--- a/tensorflow/contrib/tensor_forest/python/ops/training_ops.py
+++ b/tensorflow/contrib/tensor_forest/python/ops/training_ops.py
@@ -19,7 +19,6 @@ from __future__ import print_function
import threading
-from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import load_library
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
@@ -42,15 +41,6 @@ ops.NotDifferentiable('ScatterAddNdim')
ops.NotDifferentiable('UpdateFertileSlots')
-ops.RegisterShape('CountExtremelyRandomStats')(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape('SampleInputs')(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape('BestSplits')(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape('GrowTree')(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape('FinishedNodes')(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape('ScatterAddNdim')(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape('UpdateFertileSlots')(common_shapes.call_cpp_shape_fn)
-
-
# Workaround for the fact that importing tensorflow imports contrib
# (even if a user isn't using this or any other contrib op), but
# there's not yet any guarantee that the shared object exists.
diff --git a/tensorflow/python/framework/constant_op.py b/tensorflow/python/framework/constant_op.py
index a4967a31a9..f06b477bc8 100644
--- a/tensorflow/python/framework/constant_op.py
+++ b/tensorflow/python/framework/constant_op.py
@@ -108,7 +108,6 @@ from __future__ import print_function
import numpy as np
from tensorflow.core.framework import attr_value_pb2
-from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
@@ -169,9 +168,6 @@ def constant(value, dtype=None, shape=None, name="Const"):
return const_tensor
-ops.RegisterShape("Const")(common_shapes.call_cpp_shape_fn)
-
-
def _constant_tensor_conversion_function(v, dtype=None, name=None,
as_ref=False):
_ = as_ref
diff --git a/tensorflow/python/ops/array_ops.py b/tensorflow/python/ops/array_ops.py
index 9150b58f42..4600560738 100644
--- a/tensorflow/python/ops/array_ops.py
+++ b/tensorflow/python/ops/array_ops.py
@@ -1878,9 +1878,6 @@ def _SplitShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[0])
-ops.RegisterShape("SplitV")(common_shapes.call_cpp_shape_fn)
-
-
@ops.RegisterShape("Tile")
def _TileShape(op):
"""Shape function for the Tile op.
@@ -2020,28 +2017,12 @@ def _EditDistanceShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[2, 5])
-ops.RegisterShape("Quantize")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("Dequantize")(common_shapes.call_cpp_shape_fn)
-
-
@ops.RegisterGradient("FakeQuantWithMinMaxArgs")
def _FakeQuantWithMinMaxArgsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxArgs op."""
return fake_quant_with_min_max_args_gradient(grad, op.inputs[0])
-ops.RegisterShape("FakeQuantWithMinMaxArgs")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("FakeQuantWithMinMaxArgsGradient")(
- common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("FakeQuantWithMinMaxVars")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("FakeQuantWithMinMaxVarsGradient")(
- common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("FakeQuantWithMinMaxVarsPerChannel")(
- common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("FakeQuantWithMinMaxVarsPerChannelGradient")(
- common_shapes.call_cpp_shape_fn)
-
-
@ops.RegisterGradient("FakeQuantWithMinMaxVars")
def _FakeQuantWithMinMaxVarsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVars op."""
@@ -2357,9 +2338,6 @@ def _OneHotShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[1])
-ops.RegisterShape("PlaceholderWithDefault")(common_shapes.call_cpp_shape_fn)
-
-
def sequence_mask(lengths, maxlen=None, dtype=dtypes.bool, name=None):
"""Return a mask tensor representing the first N positions of each row.
@@ -2510,12 +2488,6 @@ def _DelegateQuantizedReshapeShape(op):
return common_shapes.call_cpp_shape_fn(
op, input_tensors_as_shapes_needed=[1])
-ops.RegisterShape("QuantizeV2")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("QuantizedBatchNormWithGlobalNormalization")(
- common_shapes.call_cpp_shape_fn)
-
-ops.RegisterShape("QuantizedConcat")(common_shapes.call_cpp_shape_fn)
-
@ops.RegisterShape("ScatterNd")
def _DelegateScatterNdShape(op):
diff --git a/tensorflow/python/ops/nn_ops.py b/tensorflow/python/ops/nn_ops.py
index 042138c707..8a1157ac59 100644
--- a/tensorflow/python/ops/nn_ops.py
+++ b/tensorflow/python/ops/nn_ops.py
@@ -1558,12 +1558,6 @@ def sparse_softmax_cross_entropy_with_logits(logits, labels, name=None):
return cost
-ops.RegisterShape("SparseSoftmaxCrossEntropyWithLogits")(
- common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("SoftmaxCrossEntropyWithLogits")(
- common_shapes.call_cpp_shape_fn)
-
-
def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the average pooling on the input.
diff --git a/tensorflow/python/ops/random_ops.py b/tensorflow/python/ops/random_ops.py
index 1755fb57a8..71476ff00f 100644
--- a/tensorflow/python/ops/random_ops.py
+++ b/tensorflow/python/ops/random_ops.py
@@ -458,6 +458,3 @@ ops.NotDifferentiable("RandomGamma")
@ops.RegisterShape("RandomUniformInt")
def _RandomShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[0])
-
-
-ops.RegisterShape("RandomShuffle")(common_shapes.call_cpp_shape_fn)
diff --git a/tensorflow/python/training/training_ops.py b/tensorflow/python/training/training_ops.py
index 4d2aca8d74..e98c32b614 100644
--- a/tensorflow/python/training/training_ops.py
+++ b/tensorflow/python/training/training_ops.py
@@ -19,55 +19,8 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from tensorflow.python.framework import common_shapes
-from tensorflow.python.framework import ops
-from tensorflow.python.framework import tensor_shape
from tensorflow.python.training import gen_training_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.training.gen_training_ops import *
# pylint: enable=wildcard-import
-
-
-# Shape functions for fused training ops
-# --------------------------------------
-#
-# The fused training ops all have the same basic structure: they take
-# one or more variables with the same shape, and emit a reference to
-# the original variable (which has the same shape as the first
-# input). In addition, they take one or more scalar tensors containing
-# hyperparameters.
-#
-# The sparse ops take the gradients as a Python IndexedSlices, which
-# means that the indices are a vector of length N, and the gradient
-# values are a tensor whose size is the same as the original variable,
-# except for the 0th dimension, which has size N.
-
-
-def _AssertInputIsScalar(op, index):
- """Raises ValueError if `op.inputs[index]` is not scalar."""
- op.inputs[index].get_shape().assert_is_compatible_with(tensor_shape.scalar())
-
-
-ops.RegisterShape("ApplyAdadelta")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("ApplyAdagrad")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("ApplyProximalAdagrad")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("ApplyFtrl")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("ApplyAdagradDA")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("ApplyAdam")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("ApplyMomentum")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("ApplyRMSProp")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("ApplyCenteredRMSProp")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("ApplyGradientDescent")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("ApplyProximalGradientDescent")(
- common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("SparseApplyProximalGradientDescent")(
- common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("SparseApplyRMSProp")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("SparseApplyCenteredRMSProp")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("SparseApplyAdadelta")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("SparseApplyAdagrad")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("SparseApplyProximalAdagrad")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("SparseApplyFtrl")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("SparseApplyAdagradDA")(common_shapes.call_cpp_shape_fn)
-ops.RegisterShape("SparseApplyMomentum")(common_shapes.call_cpp_shape_fn)