aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--tensorflow/compiler/tests/lstm.py2
-rw-r--r--tensorflow/compiler/tests/nary_ops_test.py4
-rw-r--r--tensorflow/contrib/distributions/python/kernel_tests/bijector_test.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/bernoulli.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/bijector.py20
-rw-r--r--tensorflow/contrib/distributions/python/ops/categorical.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/dirichlet.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/dirichlet_multinomial.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/distribution.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/distribution_util.py10
-rw-r--r--tensorflow/contrib/distributions/python/ops/exponential.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/gumbel.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/laplace.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/logistic.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/mixture.py6
-rw-r--r--tensorflow/contrib/distributions/python/ops/multinomial.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/mvn.py4
-rw-r--r--tensorflow/contrib/distributions/python/ops/normal.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/onehot_categorical.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/operator_pd.py17
-rw-r--r--tensorflow/contrib/distributions/python/ops/operator_pd_diag.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/operator_pd_vdvt_update.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/relaxed_onehot_categorical.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/shape.py5
-rw-r--r--tensorflow/contrib/distributions/python/ops/student_t.py4
-rw-r--r--tensorflow/contrib/distributions/python/ops/transformed_distribution.py12
-rw-r--r--tensorflow/contrib/distributions/python/ops/uniform.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/wishart.py22
-rw-r--r--tensorflow/contrib/factorization/python/ops/clustering_ops.py2
-rw-r--r--tensorflow/contrib/factorization/python/ops/factorization_ops.py6
-rw-r--r--tensorflow/contrib/factorization/python/ops/gmm.py3
-rw-r--r--tensorflow/contrib/factorization/python/ops/gmm_ops.py14
-rw-r--r--tensorflow/contrib/grid_rnn/python/ops/grid_rnn_cell.py13
-rw-r--r--tensorflow/contrib/image/python/ops/image_ops.py2
-rw-r--r--tensorflow/contrib/labeled_tensor/python/ops/ops.py2
-rw-r--r--tensorflow/contrib/labeled_tensor/python/ops/ops_test.py2
-rw-r--r--tensorflow/contrib/layers/python/layers/embedding_ops.py10
-rw-r--r--tensorflow/contrib/layers/python/layers/feature_column_ops.py2
-rw-r--r--tensorflow/contrib/layers/python/layers/layers.py8
-rw-r--r--tensorflow/contrib/layers/python/layers/target_column.py4
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/dynamic_rnn_estimator.py6
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/estimator_test.py2
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/head.py2
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/kmeans.py2
-rw-r--r--tensorflow/contrib/legacy_seq2seq/python/kernel_tests/seq2seq_test.py14
-rw-r--r--tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py4
-rw-r--r--tensorflow/contrib/linalg/python/kernel_tests/linear_operator_diag_test.py2
-rw-r--r--tensorflow/contrib/linalg/python/ops/linear_operator_composition.py2
-rw-r--r--tensorflow/contrib/linalg/python/ops/linear_operator_diag.py2
-rw-r--r--tensorflow/contrib/linalg/python/ops/linear_operator_identity.py5
-rw-r--r--tensorflow/contrib/linalg/python/ops/linear_operator_test_util.py4
-rw-r--r--tensorflow/contrib/metrics/python/ops/metric_ops.py10
-rw-r--r--tensorflow/contrib/metrics/python/ops/metric_ops_test.py10
-rw-r--r--tensorflow/contrib/ndlstm/python/lstm2d.py4
-rw-r--r--tensorflow/contrib/ndlstm/python/misc.py2
-rw-r--r--tensorflow/contrib/opt/python/training/external_optimizer.py2
-rw-r--r--tensorflow/contrib/rnn/python/kernel_tests/core_rnn_test.py2
-rw-r--r--tensorflow/contrib/rnn/python/kernel_tests/fused_rnn_cell_test.py2
-rw-r--r--tensorflow/contrib/rnn/python/kernel_tests/rnn_cell_test.py6
-rw-r--r--tensorflow/contrib/rnn/python/ops/core_rnn.py2
-rw-r--r--tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.py8
-rw-r--r--tensorflow/contrib/rnn/python/ops/gru_ops.py4
-rw-r--r--tensorflow/contrib/rnn/python/ops/lstm_ops.py6
-rw-r--r--tensorflow/contrib/rnn/python/ops/rnn.py2
-rw-r--r--tensorflow/contrib/rnn/python/ops/rnn_cell.py34
-rw-r--r--tensorflow/contrib/seq2seq/python/ops/attention_decoder_fn.py6
-rw-r--r--tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py4
-rw-r--r--tensorflow/contrib/slim/python/slim/nets/inception_v1.py18
-rw-r--r--tensorflow/contrib/slim/python/slim/nets/inception_v2.py20
-rw-r--r--tensorflow/contrib/slim/python/slim/nets/inception_v3.py32
-rw-r--r--tensorflow/contrib/solvers/python/ops/lanczos.py2
-rw-r--r--tensorflow/contrib/specs/python/specs_ops.py2
-rw-r--r--tensorflow/contrib/tensor_forest/hybrid/python/layers/fully_connected.py2
-rw-r--r--tensorflow/contrib/tensor_forest/python/ops/data_ops.py4
-rw-r--r--tensorflow/contrib/tensor_forest/python/tensor_forest.py6
-rw-r--r--tensorflow/contrib/tensor_forest/python/topn.py8
-rw-r--r--tensorflow/contrib/training/python/training/resample.py2
-rw-r--r--tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py12
-rw-r--r--tensorflow/examples/udacity/6_lstm.ipynb4
-rw-r--r--tensorflow/python/framework/function_test.py2
-rw-r--r--tensorflow/python/framework/tensor_util_test.py10
-rw-r--r--tensorflow/python/kernel_tests/confusion_matrix_test.py4
-rw-r--r--tensorflow/python/kernel_tests/control_flow_ops_py_test.py6
-rw-r--r--tensorflow/python/kernel_tests/embedding_ops_test.py2
-rw-r--r--tensorflow/python/kernel_tests/large_concat_op_test.py2
-rw-r--r--tensorflow/python/kernel_tests/metrics_test.py10
-rw-r--r--tensorflow/python/kernel_tests/partitioned_variables_test.py14
-rw-r--r--tensorflow/python/kernel_tests/reshape_op_test.py4
-rw-r--r--tensorflow/python/kernel_tests/svd_op_test.py4
-rw-r--r--tensorflow/python/ops/array_grad.py27
-rw-r--r--tensorflow/python/ops/concat_benchmark.py2
-rw-r--r--tensorflow/python/ops/control_flow_ops.py6
-rw-r--r--tensorflow/python/ops/embedding_ops.py8
-rw-r--r--tensorflow/python/ops/gradients_impl.py4
-rw-r--r--tensorflow/python/ops/gradients_test.py10
-rw-r--r--tensorflow/python/ops/image_ops_impl.py6
-rw-r--r--tensorflow/python/ops/linalg_grad.py2
-rw-r--r--tensorflow/python/ops/linalg_ops.py4
-rw-r--r--tensorflow/python/ops/math_grad.py4
-rw-r--r--tensorflow/python/ops/math_grad_test.py8
-rw-r--r--tensorflow/python/ops/math_ops.py9
-rw-r--r--tensorflow/python/ops/metrics_impl.py6
-rw-r--r--tensorflow/python/ops/nn_grad.py12
-rw-r--r--tensorflow/python/ops/nn_impl.py16
-rw-r--r--tensorflow/python/ops/nn_ops.py11
-rw-r--r--tensorflow/python/ops/random_ops.py2
-rw-r--r--tensorflow/python/ops/rnn.py2
-rw-r--r--tensorflow/python/ops/sparse_grad.py7
-rw-r--r--tensorflow/python/ops/sparse_ops.py14
-rw-r--r--tensorflow/python/ops/standard_ops.py6
-rw-r--r--tensorflow/python/ops/variables.py2
-rw-r--r--tensorflow/python/training/monitored_session.py2
-rw-r--r--tensorflow/python/training/saver_test.py6
113 files changed, 347 insertions, 356 deletions
diff --git a/tensorflow/compiler/tests/lstm.py b/tensorflow/compiler/tests/lstm.py
index 18166f51bf..43c469d032 100644
--- a/tensorflow/compiler/tests/lstm.py
+++ b/tensorflow/compiler/tests/lstm.py
@@ -61,7 +61,7 @@ def LSTMCell(weights, m_prev, c_prev, x, pad):
"""
# Apply weights to the input and previous hidden state.
# The matmul here is the "big" operation.
- xm = array_ops.concat_v2([x, m_prev], 1)
+ xm = array_ops.concat([x, m_prev], 1)
xmw = math_ops.matmul(xm, weights)
# Element-wise ops for the standard LSTM cell, with clipped activations.
diff --git a/tensorflow/compiler/tests/nary_ops_test.py b/tensorflow/compiler/tests/nary_ops_test.py
index 566c02e72f..d94e11b078 100644
--- a/tensorflow/compiler/tests/nary_ops_test.py
+++ b/tensorflow/compiler/tests/nary_ops_test.py
@@ -58,7 +58,7 @@ class NAryOpsTest(XLATestCase):
def testConcat(self):
self._testNAry(
- lambda x: array_ops.concat_v2(x, 0), [
+ lambda x: array_ops.concat(x, 0), [
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.float32), np.array(
[[7, 8, 9], [10, 11, 12]], dtype=np.float32)
@@ -67,7 +67,7 @@ class NAryOpsTest(XLATestCase):
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=np.float32))
self._testNAry(
- lambda x: array_ops.concat_v2(x, 1), [
+ lambda x: array_ops.concat(x, 1), [
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.float32), np.array(
[[7, 8, 9], [10, 11, 12]], dtype=np.float32)
diff --git a/tensorflow/contrib/distributions/python/kernel_tests/bijector_test.py b/tensorflow/contrib/distributions/python/kernel_tests/bijector_test.py
index 38a3dbda12..ff2e575b06 100644
--- a/tensorflow/contrib/distributions/python/kernel_tests/bijector_test.py
+++ b/tensorflow/contrib/distributions/python/kernel_tests/bijector_test.py
@@ -519,7 +519,7 @@ class InlineBijectorTest(test.TestCase):
def testShapeGetters(self):
with self.test_session():
bijector = bijectors.Inline(
- forward_event_shape_fn=lambda x: array_ops.concat_v2((x, [1]), 0),
+ forward_event_shape_fn=lambda x: array_ops.concat((x, [1]), 0),
get_forward_event_shape_fn=lambda x: x.as_list() + [1],
inverse_event_shape_fn=lambda x: x[:-1],
get_inverse_event_shape_fn=lambda x: x[:-1],
diff --git a/tensorflow/contrib/distributions/python/ops/bernoulli.py b/tensorflow/contrib/distributions/python/ops/bernoulli.py
index d2ede5e967..d538781b63 100644
--- a/tensorflow/contrib/distributions/python/ops/bernoulli.py
+++ b/tensorflow/contrib/distributions/python/ops/bernoulli.py
@@ -118,7 +118,7 @@ class Bernoulli(distribution.Distribution):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
- new_shape = array_ops.concat_v2(([n], self.batch_shape()), 0)
+ new_shape = array_ops.concat(([n], self.batch_shape()), 0)
uniform = random_ops.random_uniform(
new_shape, seed=seed, dtype=self.p.dtype)
sample = math_ops.less(uniform, self.p)
diff --git a/tensorflow/contrib/distributions/python/ops/bijector.py b/tensorflow/contrib/distributions/python/ops/bijector.py
index 7a089bb53b..2d1cc1d2b4 100644
--- a/tensorflow/contrib/distributions/python/ops/bijector.py
+++ b/tensorflow/contrib/distributions/python/ops/bijector.py
@@ -1378,8 +1378,7 @@ class _TriLPlusVDVTLightweightOperatorPD(object):
id_shape = v_shape[:-2] + [v_shape[-1], v_shape[-1]]
else:
v_shape = array_ops.shape(v)
- id_shape = array_ops.concat_v2(
- [v_shape[:-2], [v_shape[-1], v_shape[-1]]], 0)
+ id_shape = array_ops.concat([v_shape[:-2], [v_shape[-1], v_shape[-1]]], 0)
self._d = operator_pd_identity.OperatorPDIdentity(
id_shape, v.dtype, verify_pd=self.validate_args)
self._d_inv = self._d
@@ -1740,7 +1739,7 @@ class Affine(Bijector):
return identity_multiplier
# Infer the shape from the V and D.
v_shape = array_ops.shape(perturb_factor)
- identity_shape = array_ops.concat_v2((v_shape[:-1], (v_shape[-2],)), 0)
+ identity_shape = array_ops.concat((v_shape[:-1], (v_shape[-2],)), 0)
scaled_identity = operator_pd_identity.OperatorPDIdentity(
identity_shape,
perturb_factor.dtype.base_dtype,
@@ -1796,9 +1795,10 @@ class Affine(Bijector):
math_ops.equal(array_ops.rank(matrix), min_rank),
math_ops.equal(event_ndims, 1))
left = array_ops.where(self._rank_two_event_ndims_one, 1, 0)
- pad = array_ops.concat_v2([
- array_ops.ones([left], dtype=dtypes.int32),
- array_ops.shape(matrix)], 0)
+ pad = array_ops.concat(
+ [array_ops.ones(
+ [left], dtype=dtypes.int32), array_ops.shape(matrix)],
+ 0)
return array_ops.reshape(matrix, pad)
def _infer_batch_ndims(self):
@@ -2221,7 +2221,7 @@ class SoftmaxCentered(Bijector):
ndims = (y.get_shape().ndims if y.get_shape().ndims is not None
else array_ops.rank(y))
y = array_ops.pad(y,
- paddings=array_ops.concat_v2(
+ paddings=array_ops.concat(
(array_ops.zeros(
(ndims - 1, 2), dtype=dtypes.int32), [[0, 1]]),
0))
@@ -2265,14 +2265,12 @@ class SoftmaxCentered(Bijector):
depth=ndims,
on_value=shape[-1]-np.array(1, dtype=shape.dtype),
dtype=shape.dtype)
- size = array_ops.concat_v2(
- (shape[:-1], np.asarray(
- [1], dtype=shape.dtype)), 0)
+ size = array_ops.concat((shape[:-1], np.asarray([1], dtype=shape.dtype)), 0)
log_normalization = -array_ops.strided_slice(x, begin, begin + size)
# Here we slice out all but the last coordinate; see above for idea.
begin = array_ops.zeros_like(shape)
- size = array_ops.concat_v2((shape[:-1], [shape[-1] - 1]), 0)
+ size = array_ops.concat((shape[:-1], [shape[-1] - 1]), 0)
x = array_ops.strided_slice(x, begin, begin + size)
x += log_normalization
diff --git a/tensorflow/contrib/distributions/python/ops/categorical.py b/tensorflow/contrib/distributions/python/ops/categorical.py
index a3391fd82a..feca611d00 100644
--- a/tensorflow/contrib/distributions/python/ops/categorical.py
+++ b/tensorflow/contrib/distributions/python/ops/categorical.py
@@ -189,7 +189,7 @@ class Categorical(distribution.Distribution):
samples = math_ops.cast(samples, self.dtype)
ret = array_ops.reshape(
array_ops.transpose(samples),
- array_ops.concat_v2(([n], self.batch_shape()), 0))
+ array_ops.concat(([n], self.batch_shape()), 0))
return ret
def _log_prob(self, k):
diff --git a/tensorflow/contrib/distributions/python/ops/dirichlet.py b/tensorflow/contrib/distributions/python/ops/dirichlet.py
index 518c12d4fa..027a778ed7 100644
--- a/tensorflow/contrib/distributions/python/ops/dirichlet.py
+++ b/tensorflow/contrib/distributions/python/ops/dirichlet.py
@@ -238,7 +238,7 @@ class Dirichlet(distribution.Distribution):
math_ops.cast(self.event_shape()[0], self.dtype)))
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
- shape = array_ops.concat_v2((self.batch_shape(), self.event_shape()), 0)
+ shape = array_ops.concat((self.batch_shape(), self.event_shape()), 0)
return array_ops.where(
math_ops.greater(self.alpha, 1.),
mode,
diff --git a/tensorflow/contrib/distributions/python/ops/dirichlet_multinomial.py b/tensorflow/contrib/distributions/python/ops/dirichlet_multinomial.py
index 6e16eb4ce4..839904b47c 100644
--- a/tensorflow/contrib/distributions/python/ops/dirichlet_multinomial.py
+++ b/tensorflow/contrib/distributions/python/ops/dirichlet_multinomial.py
@@ -235,7 +235,7 @@ class DirichletMultinomial(distribution.Distribution):
seed=distribution_util.gen_new_seed(seed, salt="dirichlet_multinomial"))
x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k),
reduction_indices=-2)
- final_shape = array_ops.concat_v2([[n], self.batch_shape(), [k]], 0)
+ final_shape = array_ops.concat([[n], self.batch_shape(), [k]], 0)
return array_ops.reshape(x, final_shape)
@distribution_util.AppendDocstring(_dirichlet_multinomial_prob_note)
diff --git a/tensorflow/contrib/distributions/python/ops/distribution.py b/tensorflow/contrib/distributions/python/ops/distribution.py
index 74d5319613..99198a2877 100644
--- a/tensorflow/contrib/distributions/python/ops/distribution.py
+++ b/tensorflow/contrib/distributions/python/ops/distribution.py
@@ -580,7 +580,7 @@ class Distribution(_BaseDistribution):
sample_shape, "sample_shape")
samples = self._sample_n(n, seed, **condition_kwargs)
batch_event_shape = array_ops.shape(samples)[1:]
- final_shape = array_ops.concat_v2([sample_shape, batch_event_shape], 0)
+ final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
samples = array_ops.reshape(samples, final_shape)
samples = self._set_sample_static_shape(samples, sample_shape)
return samples
diff --git a/tensorflow/contrib/distributions/python/ops/distribution_util.py b/tensorflow/contrib/distributions/python/ops/distribution_util.py
index 671aa6a513..71e42bc214 100644
--- a/tensorflow/contrib/distributions/python/ops/distribution_util.py
+++ b/tensorflow/contrib/distributions/python/ops/distribution_util.py
@@ -126,10 +126,10 @@ def same_dynamic_shape(a, b):
# static shape inference may break the equality comparison between
# shape(a) and shape(b) in math_ops.equal.
lambda: math_ops.reduce_all(math_ops.equal(
- array_ops.concat_v2((
+ array_ops.concat((
array_ops.shape(a),
array_ops.shape(b)), 0),
- array_ops.concat_v2((
+ array_ops.concat((
array_ops.shape(b),
array_ops.shape(a)), 0))),
lambda: constant_op.constant(False))
@@ -371,7 +371,7 @@ def rotate_transpose(x, shift, name="rotate_transpose"):
ndims - math_ops.mod(shift, ndims))
first = math_ops.range(0, shift)
last = math_ops.range(shift, ndims)
- perm = array_ops.concat_v2((last, first), 0)
+ perm = array_ops.concat((last, first), 0)
return array_ops.transpose(x, perm=perm)
@@ -427,7 +427,7 @@ def pick_vector(cond,
false_vector.name, false_vector.dtype))
n = array_ops.shape(true_vector)[0]
return array_ops.slice(
- array_ops.concat_v2((true_vector, false_vector), 0),
+ array_ops.concat((true_vector, false_vector), 0),
[array_ops.where(cond, 0, n)], [array_ops.where(cond, n, -1)])
@@ -558,7 +558,7 @@ def fill_lower_triangular(x, validate_args=False, name="fill_lower_triangular"):
# Gather up, reshape, and return.
y = array_ops.reshape(x, [-1, d])
y = array_ops.gather_nd(y, idx)
- y = array_ops.reshape(y, array_ops.concat_v2([batch_shape, [n, n]], 0))
+ y = array_ops.reshape(y, array_ops.concat([batch_shape, [n, n]], 0))
y = array_ops.matrix_band_part(y, -1, 0)
y.set_shape(y.get_shape().merge_with(final_shape))
return y
diff --git a/tensorflow/contrib/distributions/python/ops/exponential.py b/tensorflow/contrib/distributions/python/ops/exponential.py
index 31bc853bf9..cd6e5c2d1a 100644
--- a/tensorflow/contrib/distributions/python/ops/exponential.py
+++ b/tensorflow/contrib/distributions/python/ops/exponential.py
@@ -89,7 +89,7 @@ class Exponential(gamma.Gamma):
return self._lam
def _sample_n(self, n, seed=None):
- shape = array_ops.concat_v2(([n], array_ops.shape(self._lam)), 0)
+ shape = array_ops.concat(([n], array_ops.shape(self._lam)), 0)
# Sample uniformly-at-random from the open-interval (0, 1).
sampled = random_ops.random_uniform(
shape,
diff --git a/tensorflow/contrib/distributions/python/ops/gumbel.py b/tensorflow/contrib/distributions/python/ops/gumbel.py
index 7ddc1cfc20..ddac5f4e75 100644
--- a/tensorflow/contrib/distributions/python/ops/gumbel.py
+++ b/tensorflow/contrib/distributions/python/ops/gumbel.py
@@ -158,7 +158,7 @@ class _Gumbel(distribution.Distribution):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
- shape = array_ops.concat_v2(([n], array_ops.shape(self.mean())), 0)
+ shape = array_ops.concat(([n], array_ops.shape(self.mean())), 0)
np_dtype = self.dtype.as_numpy_dtype()
minval = np.nextafter(np_dtype(0), np_dtype(1))
uniform = random_ops.random_uniform(shape=shape,
diff --git a/tensorflow/contrib/distributions/python/ops/laplace.py b/tensorflow/contrib/distributions/python/ops/laplace.py
index 37355a1282..ec0adc2142 100644
--- a/tensorflow/contrib/distributions/python/ops/laplace.py
+++ b/tensorflow/contrib/distributions/python/ops/laplace.py
@@ -125,7 +125,7 @@ class Laplace(distribution.Distribution):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
- shape = array_ops.concat_v2(([n], self.batch_shape()), 0)
+ shape = array_ops.concat(([n], self.batch_shape()), 0)
# Sample uniformly-at-random from the open-interval (-1, 1).
uniform_samples = random_ops.random_uniform(
shape=shape,
diff --git a/tensorflow/contrib/distributions/python/ops/logistic.py b/tensorflow/contrib/distributions/python/ops/logistic.py
index f066874d4f..9d68eb7dfd 100644
--- a/tensorflow/contrib/distributions/python/ops/logistic.py
+++ b/tensorflow/contrib/distributions/python/ops/logistic.py
@@ -157,7 +157,7 @@ class _Logistic(distribution.Distribution):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
- shape = array_ops.concat_v2(([n], array_ops.shape(self.mean())), 0)
+ shape = array_ops.concat(([n], array_ops.shape(self.mean())), 0)
np_dtype = self.dtype.as_numpy_dtype()
minval = np.nextafter(np_dtype(0), np_dtype(1))
uniform = random_ops.random_uniform(shape=shape,
diff --git a/tensorflow/contrib/distributions/python/ops/mixture.py b/tensorflow/contrib/distributions/python/ops/mixture.py
index 0e98e9e3b0..16f7fa710b 100644
--- a/tensorflow/contrib/distributions/python/ops/mixture.py
+++ b/tensorflow/contrib/distributions/python/ops/mixture.py
@@ -330,7 +330,7 @@ class Mixture(distribution.Distribution):
partitioned_batch_indices[c])
samples_class_c = array_ops.reshape(
samples_class_c,
- array_ops.concat_v2(([n_class * batch_size], event_shape), 0))
+ array_ops.concat(([n_class * batch_size], event_shape), 0))
samples_class_c = array_ops.gather(
samples_class_c, lookup_partitioned_batch_indices,
name="samples_class_c_gather")
@@ -341,8 +341,8 @@ class Mixture(distribution.Distribution):
indices=partitioned_samples_indices, data=samples_class)
# Reshape back to proper sample, batch, and event shape.
ret = array_ops.reshape(lhs_flat_ret,
- array_ops.concat_v2((samples_shape,
- self.event_shape()), 0))
+ array_ops.concat((samples_shape,
+ self.event_shape()), 0))
ret.set_shape(
tensor_shape.TensorShape(static_samples_shape).concatenate(
self.get_event_shape()))
diff --git a/tensorflow/contrib/distributions/python/ops/multinomial.py b/tensorflow/contrib/distributions/python/ops/multinomial.py
index 8ff2724f5e..9f366016b2 100644
--- a/tensorflow/contrib/distributions/python/ops/multinomial.py
+++ b/tensorflow/contrib/distributions/python/ops/multinomial.py
@@ -229,7 +229,7 @@ class Multinomial(distribution.Distribution):
x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k),
reduction_indices=-2) # shape: [B, n, k]
x = array_ops.transpose(x, perm=[1, 0, 2])
- final_shape = array_ops.concat_v2([[n], self.batch_shape(), [k]], 0)
+ final_shape = array_ops.concat([[n], self.batch_shape(), [k]], 0)
return array_ops.reshape(x, final_shape)
@distribution_util.AppendDocstring(_multinomial_prob_note)
diff --git a/tensorflow/contrib/distributions/python/ops/mvn.py b/tensorflow/contrib/distributions/python/ops/mvn.py
index 0595ca89d8..9786e9b812 100644
--- a/tensorflow/contrib/distributions/python/ops/mvn.py
+++ b/tensorflow/contrib/distributions/python/ops/mvn.py
@@ -229,7 +229,7 @@ class _MultivariateNormalOperatorPD(distribution.Distribution):
def _sample_n(self, n, seed=None):
# Recall _assert_valid_mu ensures mu and self._cov have same batch shape.
- shape = array_ops.concat_v2([self._cov.vector_shape(), [n]], 0)
+ shape = array_ops.concat([self._cov.vector_shape(), [n]], 0)
white_samples = random_ops.random_normal(shape=shape,
mean=0.,
stddev=1.,
@@ -239,7 +239,7 @@ class _MultivariateNormalOperatorPD(distribution.Distribution):
correlated_samples = self._cov.sqrt_matmul(white_samples)
# Move the last dimension to the front
- perm = array_ops.concat_v2(
+ perm = array_ops.concat(
(array_ops.stack([array_ops.rank(correlated_samples) - 1]),
math_ops.range(0, array_ops.rank(correlated_samples) - 1)), 0)
diff --git a/tensorflow/contrib/distributions/python/ops/normal.py b/tensorflow/contrib/distributions/python/ops/normal.py
index 124f97f2c4..7fbbf22455 100644
--- a/tensorflow/contrib/distributions/python/ops/normal.py
+++ b/tensorflow/contrib/distributions/python/ops/normal.py
@@ -157,7 +157,7 @@ class Normal(distribution.Distribution):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
- shape = array_ops.concat_v2(([n], array_ops.shape(self.mean())), 0)
+ shape = array_ops.concat(([n], array_ops.shape(self.mean())), 0)
sampled = random_ops.random_normal(
shape=shape, mean=0, stddev=1, dtype=self.mu.dtype, seed=seed)
return sampled * self.sigma + self.mu
diff --git a/tensorflow/contrib/distributions/python/ops/onehot_categorical.py b/tensorflow/contrib/distributions/python/ops/onehot_categorical.py
index 5d5ba2490c..1eb3de0e81 100644
--- a/tensorflow/contrib/distributions/python/ops/onehot_categorical.py
+++ b/tensorflow/contrib/distributions/python/ops/onehot_categorical.py
@@ -186,7 +186,7 @@ class _OneHotCategorical(distribution.Distribution):
return self.logits.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
- sample_shape = array_ops.concat_v2(([n], array_ops.shape(self.logits)), 0)
+ sample_shape = array_ops.concat(([n], array_ops.shape(self.logits)), 0)
logits = self.logits
if logits.get_shape().ndims == 2:
logits_2d = logits
diff --git a/tensorflow/contrib/distributions/python/ops/operator_pd.py b/tensorflow/contrib/distributions/python/ops/operator_pd.py
index 24147ae754..5471db21ed 100644
--- a/tensorflow/contrib/distributions/python/ops/operator_pd.py
+++ b/tensorflow/contrib/distributions/python/ops/operator_pd.py
@@ -428,7 +428,7 @@ class OperatorPDBase(object):
# Derived classes get this "for free" once .shape() is implemented.
with ops.name_scope(self.name):
with ops.name_scope(name, values=self.inputs):
- return array_ops.concat_v2(
+ return array_ops.concat(
(self.batch_shape(), [self.vector_space_dimension()]), 0)
def vector_space_dimension(self, name="vector_space_dimension"):
@@ -703,12 +703,11 @@ def _flip_matrix_to_vector_dynamic(mat, batch_shape):
"""Flip matrix to vector with dynamic shapes."""
mat_rank = array_ops.rank(mat)
k = array_ops.gather(array_ops.shape(mat), mat_rank - 2)
- final_shape = array_ops.concat_v2((batch_shape, [k]), 0)
+ final_shape = array_ops.concat((batch_shape, [k]), 0)
# mat.shape = matrix_batch_shape + [k, M]
# Permutation corresponding to [M] + matrix_batch_shape + [k]
- perm = array_ops.concat_v2(
- ([mat_rank - 1], math_ops.range(0, mat_rank - 1)), 0)
+ perm = array_ops.concat(([mat_rank - 1], math_ops.range(0, mat_rank - 1)), 0)
mat_with_end_at_beginning = array_ops.transpose(mat, perm=perm)
vector = array_ops.reshape(mat_with_end_at_beginning, final_shape)
return vector
@@ -779,12 +778,12 @@ def _flip_vector_to_matrix_dynamic(vec, batch_shape):
# If vec_shape_left = [M1,...,Mm], condensed_shape = [M1*...*Mm]
condensed_shape = [math_ops.reduce_prod(vec_shape_left)]
k = array_ops.gather(vec_shape, vec_rank - 1)
- new_shape = array_ops.concat_v2((batch_shape, [k], condensed_shape), 0)
+ new_shape = array_ops.concat((batch_shape, [k], condensed_shape), 0)
def _flip_front_dims_to_back():
# Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm]
- perm = array_ops.concat_v2(
- (math_ops.range(m, vec_rank), math_ops.range(0, m)), 0)
+ perm = array_ops.concat((math_ops.range(m, vec_rank), math_ops.range(0, m)),
+ 0)
return array_ops.transpose(vec, perm=perm)
x_flipped = control_flow_ops.cond(
@@ -817,8 +816,8 @@ def _flip_vector_to_matrix_static(vec, batch_shape):
def _flip_front_dims_to_back():
# Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm]
- perm = array_ops.concat_v2(
- (math_ops.range(m, vec_rank), math_ops.range(0, m)), 0)
+ perm = array_ops.concat((math_ops.range(m, vec_rank), math_ops.range(0, m)),
+ 0)
return array_ops.transpose(vec, perm=perm)
if 0 < m:
diff --git a/tensorflow/contrib/distributions/python/ops/operator_pd_diag.py b/tensorflow/contrib/distributions/python/ops/operator_pd_diag.py
index 8cbcca5daf..9d7d2a3621 100644
--- a/tensorflow/contrib/distributions/python/ops/operator_pd_diag.py
+++ b/tensorflow/contrib/distributions/python/ops/operator_pd_diag.py
@@ -82,7 +82,7 @@ class OperatorPDDiagBase(operator_pd.OperatorPDBase):
def _shape(self):
d_shape = array_ops.shape(self._diag)
k = array_ops.gather(d_shape, array_ops.size(d_shape) - 1)
- return array_ops.concat_v2((d_shape, [k]), 0)
+ return array_ops.concat((d_shape, [k]), 0)
@abc.abstractmethod
def _batch_log_det(self):
diff --git a/tensorflow/contrib/distributions/python/ops/operator_pd_vdvt_update.py b/tensorflow/contrib/distributions/python/ops/operator_pd_vdvt_update.py
index 90b1ae7d61..9f494e9e3d 100644
--- a/tensorflow/contrib/distributions/python/ops/operator_pd_vdvt_update.py
+++ b/tensorflow/contrib/distributions/python/ops/operator_pd_vdvt_update.py
@@ -147,7 +147,7 @@ class OperatorPDSqrtVDVTUpdate(operator_pd.OperatorPDBase):
v_rank = array_ops.rank(v)
v_batch_shape = array_ops.strided_slice(v_shape, [0], [v_rank - 2])
r = array_ops.gather(v_shape, v_rank - 1) # Last dim of v
- id_shape = array_ops.concat_v2((v_batch_shape, [r, r]), 0)
+ id_shape = array_ops.concat((v_batch_shape, [r, r]), 0)
return operator_pd_identity.OperatorPDIdentity(
id_shape, v.dtype, verify_pd=self._verify_pd)
diff --git a/tensorflow/contrib/distributions/python/ops/relaxed_onehot_categorical.py b/tensorflow/contrib/distributions/python/ops/relaxed_onehot_categorical.py
index 11742a16dc..e5aabe86a0 100644
--- a/tensorflow/contrib/distributions/python/ops/relaxed_onehot_categorical.py
+++ b/tensorflow/contrib/distributions/python/ops/relaxed_onehot_categorical.py
@@ -242,7 +242,7 @@ class _ExpRelaxedOneHotCategorical(distribution.Distribution):
return self.logits.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
- sample_shape = array_ops.concat_v2(([n], array_ops.shape(self.logits)), 0)
+ sample_shape = array_ops.concat(([n], array_ops.shape(self.logits)), 0)
logits = self.logits * array_ops.ones(sample_shape)
if logits.get_shape().ndims == 2:
logits_2d = logits
diff --git a/tensorflow/contrib/distributions/python/ops/shape.py b/tensorflow/contrib/distributions/python/ops/shape.py
index 7a30218e95..cadbd010dd 100644
--- a/tensorflow/contrib/distributions/python/ops/shape.py
+++ b/tensorflow/contrib/distributions/python/ops/shape.py
@@ -387,7 +387,7 @@ class _DistributionShape(object):
if expand_batch_dim:
batch_shape = distribution_util.pick_vector(
self._batch_ndims_is_0, [1], batch_shape)
- new_shape = array_ops.concat_v2([[-1], batch_shape, event_shape], 0)
+ new_shape = array_ops.concat([[-1], batch_shape, event_shape], 0)
x = array_ops.reshape(x, shape=new_shape)
x = distribution_util.rotate_transpose(x, shift=-1)
return x, sample_shape
@@ -437,8 +437,7 @@ class _DistributionShape(object):
math_ops.logical_and(expand_batch_dim, self._batch_ndims_is_0),
2, 1 + self.batch_ndims)
event_shape = s[event_start:event_start+self.event_ndims]
- new_shape = array_ops.concat_v2(
- (sample_shape, batch_shape, event_shape), 0)
+ new_shape = array_ops.concat((sample_shape, batch_shape, event_shape), 0)
x = array_ops.reshape(x, shape=new_shape)
return x
diff --git a/tensorflow/contrib/distributions/python/ops/student_t.py b/tensorflow/contrib/distributions/python/ops/student_t.py
index 17670c8572..0f32472252 100644
--- a/tensorflow/contrib/distributions/python/ops/student_t.py
+++ b/tensorflow/contrib/distributions/python/ops/student_t.py
@@ -196,7 +196,7 @@ class StudentT(distribution.Distribution):
# Y = X / sqrt(Z / df)
# then:
# Y ~ StudentT(df).
- shape = array_ops.concat_v2([[n], self.batch_shape()], 0)
+ shape = array_ops.concat([[n], self.batch_shape()], 0)
normal_sample = random_ops.random_normal(shape, dtype=self.dtype, seed=seed)
df = self.df * array_ops.ones(self.batch_shape(), dtype=self.dtype)
gamma_sample = random_ops.random_gamma(
@@ -235,7 +235,7 @@ class StudentT(distribution.Distribution):
def _entropy(self):
v = array_ops.ones(self.batch_shape(), dtype=self.dtype)[..., None]
u = v * self.df[..., None]
- beta_arg = array_ops.concat_v2([u, v], -1) / 2.
+ beta_arg = array_ops.concat([u, v], -1) / 2.
return (math_ops.log(math_ops.abs(self.sigma)) +
0.5 * math_ops.log(self.df) +
special_math_ops.lbeta(beta_arg) +
diff --git a/tensorflow/contrib/distributions/python/ops/transformed_distribution.py b/tensorflow/contrib/distributions/python/ops/transformed_distribution.py
index 624b68e01f..f7c2206e2b 100644
--- a/tensorflow/contrib/distributions/python/ops/transformed_distribution.py
+++ b/tensorflow/contrib/distributions/python/ops/transformed_distribution.py
@@ -87,7 +87,7 @@ def _concat_vectors(*args):
"""Convenience function which concatenates input vectors."""
args_ = [_static_value(x) for x in args]
if any(x_ is None for x_ in args_):
- return array_ops.concat_v2(args, 0)
+ return array_ops.concat(args, 0)
return constant_op.constant([x_ for vec_ in args_ for x_ in vec_])
@@ -507,13 +507,15 @@ class TransformedDistribution(distributions.Distribution):
entropy *= math_ops.cast(math_ops.reduce_prod(self._override_event_shape),
dtype=entropy.dtype.base_dtype)
if self._is_maybe_batch_override:
- new_shape = array_ops.concat_v2([
+ new_shape = array_ops.concat([
_ones_like(self._override_batch_shape),
- self.distribution.batch_shape()], 0)
+ self.distribution.batch_shape()
+ ], 0)
entropy = array_ops.reshape(entropy, new_shape)
- multiples = array_ops.concat_v2([
+ multiples = array_ops.concat([
self._override_batch_shape,
- _ones_like(self.distribution.batch_shape())], 0)
+ _ones_like(self.distribution.batch_shape())
+ ], 0)
entropy = array_ops.tile(entropy, multiples)
dummy = 0.
return entropy - self.bijector.inverse_log_det_jacobian(dummy)
diff --git a/tensorflow/contrib/distributions/python/ops/uniform.py b/tensorflow/contrib/distributions/python/ops/uniform.py
index ae9be623e3..6c640709b2 100644
--- a/tensorflow/contrib/distributions/python/ops/uniform.py
+++ b/tensorflow/contrib/distributions/python/ops/uniform.py
@@ -136,7 +136,7 @@ class Uniform(distribution.Distribution):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
- shape = array_ops.concat_v2(([n], self.batch_shape()), 0)
+ shape = array_ops.concat(([n], self.batch_shape()), 0)
samples = random_ops.random_uniform(shape=shape,
dtype=self.dtype,
seed=seed)
diff --git a/tensorflow/contrib/distributions/python/ops/wishart.py b/tensorflow/contrib/distributions/python/ops/wishart.py
index 401b33788f..c9326f4f4d 100644
--- a/tensorflow/contrib/distributions/python/ops/wishart.py
+++ b/tensorflow/contrib/distributions/python/ops/wishart.py
@@ -198,7 +198,7 @@ class _WishartOperatorPD(distribution.Distribution):
batch_ndims = array_ops.shape(batch_shape)[0]
ndims = batch_ndims + 3 # sample_ndims=1, event_ndims=2
- shape = array_ops.concat_v2(((n,), batch_shape, event_shape), 0)
+ shape = array_ops.concat(((n,), batch_shape, event_shape), 0)
# Complexity: O(nbk^2)
x = random_ops.random_normal(shape=shape,
@@ -226,9 +226,9 @@ class _WishartOperatorPD(distribution.Distribution):
# Make batch-op ready.
# Complexity: O(nbk^2)
- perm = array_ops.concat_v2((math_ops.range(1, ndims), (0,)), 0)
+ perm = array_ops.concat((math_ops.range(1, ndims), (0,)), 0)
x = array_ops.transpose(x, perm)
- shape = array_ops.concat_v2((batch_shape, (event_shape[0], -1)), 0)
+ shape = array_ops.concat((batch_shape, (event_shape[0], -1)), 0)
x = array_ops.reshape(x, shape)
# Complexity: O(nbM) where M is the complexity of the operator solving a
@@ -239,9 +239,9 @@ class _WishartOperatorPD(distribution.Distribution):
# Undo make batch-op ready.
# Complexity: O(nbk^2)
- shape = array_ops.concat_v2((batch_shape, event_shape, (n,)), 0)
+ shape = array_ops.concat((batch_shape, event_shape, (n,)), 0)
x = array_ops.reshape(x, shape)
- perm = array_ops.concat_v2(((ndims - 1,), math_ops.range(0, ndims - 1)), 0)
+ perm = array_ops.concat(((ndims - 1,), math_ops.range(0, ndims - 1)), 0)
x = array_ops.transpose(x, perm)
if not self.cholesky_input_output_matrices:
@@ -278,10 +278,10 @@ class _WishartOperatorPD(distribution.Distribution):
# Complexity: O(nbk^2) since transpose must access every element.
scale_sqrt_inv_x_sqrt = x_sqrt
- perm = array_ops.concat_v2((math_ops.range(sample_ndims, ndims),
- math_ops.range(0, sample_ndims)), 0)
+ perm = array_ops.concat((math_ops.range(sample_ndims, ndims),
+ math_ops.range(0, sample_ndims)), 0)
scale_sqrt_inv_x_sqrt = array_ops.transpose(scale_sqrt_inv_x_sqrt, perm)
- shape = array_ops.concat_v2(
+ shape = array_ops.concat(
(batch_shape, (math_ops.cast(
self.dimension, dtype=dtypes.int32), -1)),
0)
@@ -296,10 +296,10 @@ class _WishartOperatorPD(distribution.Distribution):
# Undo make batch-op ready.
# Complexity: O(nbk^2)
- shape = array_ops.concat_v2((batch_shape, event_shape, sample_shape), 0)
+ shape = array_ops.concat((batch_shape, event_shape, sample_shape), 0)
scale_sqrt_inv_x_sqrt = array_ops.reshape(scale_sqrt_inv_x_sqrt, shape)
- perm = array_ops.concat_v2((math_ops.range(ndims - sample_ndims, ndims),
- math_ops.range(0, ndims - sample_ndims)), 0)
+ perm = array_ops.concat((math_ops.range(ndims - sample_ndims, ndims),
+ math_ops.range(0, ndims - sample_ndims)), 0)
scale_sqrt_inv_x_sqrt = array_ops.transpose(scale_sqrt_inv_x_sqrt, perm)
# Write V = SS', X = LL'. Then:
diff --git a/tensorflow/contrib/factorization/python/ops/clustering_ops.py b/tensorflow/contrib/factorization/python/ops/clustering_ops.py
index dcade6ae8a..72b1326f47 100644
--- a/tensorflow/contrib/factorization/python/ops/clustering_ops.py
+++ b/tensorflow/contrib/factorization/python/ops/clustering_ops.py
@@ -359,7 +359,7 @@ class KMeans(object):
inp, unique_idx, num_unique_cluster_idx)
# Shape to enable broadcasting count_updates and learning_rate to inp.
# It extends the shape with 1's to match the rank of inp.
- broadcast_shape = array_ops.concat_v2(
+ broadcast_shape = array_ops.concat(
[
array_ops.reshape(num_unique_cluster_idx, [1]), array_ops.ones(
array_ops.reshape(array_ops.rank(inp) - 1, [1]),
diff --git a/tensorflow/contrib/factorization/python/ops/factorization_ops.py b/tensorflow/contrib/factorization/python/ops/factorization_ops.py
index c1b0fa17c9..25cc66ca81 100644
--- a/tensorflow/contrib/factorization/python/ops/factorization_ops.py
+++ b/tensorflow/contrib/factorization/python/ops/factorization_ops.py
@@ -788,10 +788,10 @@ class WALSModel(object):
col_shape = [num_rows]
right = embedding_ops.embedding_lookup(
right_factors, gather_indices, partition_strategy="div")
- new_sp_indices = array_ops.concat_v2([row_ids, col_ids], 1)
- new_sp_shape = (array_ops.concat_v2([row_shape, col_shape], 0) if
+ new_sp_indices = array_ops.concat([row_ids, col_ids], 1)
+ new_sp_shape = (array_ops.concat([row_shape, col_shape], 0) if
transpose_input else
- array_ops.concat_v2([col_shape, row_shape], 0))
+ array_ops.concat([col_shape, row_shape], 0))
new_sp_input = sparse_tensor.SparseTensor(
indices=new_sp_indices,
values=sp_input.values,
diff --git a/tensorflow/contrib/factorization/python/ops/gmm.py b/tensorflow/contrib/factorization/python/ops/gmm.py
index 86450d4bbd..36074c3fac 100644
--- a/tensorflow/contrib/factorization/python/ops/gmm.py
+++ b/tensorflow/contrib/factorization/python/ops/gmm.py
@@ -191,8 +191,7 @@ class GMM(estimator_lib.Estimator, TransformerMixin):
def _parse_tensor_or_dict(self, features):
if isinstance(features, dict):
- return array_ops.concat_v2([features[k] for k in sorted(features.keys())],
- 1)
+ return array_ops.concat([features[k] for k in sorted(features.keys())], 1)
return features
def _get_train_ops(self, features, _):
diff --git a/tensorflow/contrib/factorization/python/ops/gmm_ops.py b/tensorflow/contrib/factorization/python/ops/gmm_ops.py
index a94bda98cb..e795c0aac7 100644
--- a/tensorflow/contrib/factorization/python/ops/gmm_ops.py
+++ b/tensorflow/contrib/factorization/python/ops/gmm_ops.py
@@ -346,9 +346,9 @@ class GmmAlgorithm(object):
1)
self._w_mul_x.append(w_mul_x)
# Partial covariances.
- x = array_ops.concat_v2([shard for _ in range(self._num_classes)], 0)
+ x = array_ops.concat([shard for _ in range(self._num_classes)], 0)
x_trans = array_ops.transpose(x, perm=[0, 2, 1])
- x_mul_w = array_ops.concat_v2([
+ x_mul_w = array_ops.concat([
array_ops.expand_dims(x_trans[k, :, :] * self._w[shard_id][:, k], 0)
for k in range(self._num_classes)
], 0)
@@ -395,7 +395,7 @@ class GmmAlgorithm(object):
elif self._covariance_type == DIAG_COVARIANCE:
new_covs.append(
array_ops.expand_dims(array_ops.diag_part(new_cov), 0))
- new_covs = array_ops.concat_v2(new_covs, 0)
+ new_covs = array_ops.concat(new_covs, 0)
if 'c' in self._params:
# Train operations don't need to take care of the means
# because covariances already depend on it.
@@ -430,14 +430,14 @@ class GmmAlgorithm(object):
diff, perm=[0, 2, 1]))))
self._all_scores.append(
array_ops.reshape(
- array_ops.concat_v2(all_scores, 1),
+ array_ops.concat(all_scores, 1),
array_ops.stack([self._num_examples, self._num_classes])))
# Distance to the associated class.
- self._all_scores = array_ops.concat_v2(self._all_scores, 0)
- assignments = array_ops.concat_v2(self.assignments(), 0)
+ self._all_scores = array_ops.concat(self._all_scores, 0)
+ assignments = array_ops.concat(self.assignments(), 0)
rows = math_ops.to_int64(math_ops.range(0, self._num_examples))
- indices = array_ops.concat_v2(
+ indices = array_ops.concat(
[array_ops.expand_dims(rows, 1), array_ops.expand_dims(assignments, 1)],
1)
self._scores = array_ops.gather_nd(self._all_scores, indices)
diff --git a/tensorflow/contrib/grid_rnn/python/ops/grid_rnn_cell.py b/tensorflow/contrib/grid_rnn/python/ops/grid_rnn_cell.py
index aef4ce3fdb..b35d0df98c 100644
--- a/tensorflow/contrib/grid_rnn/python/ops/grid_rnn_cell.py
+++ b/tensorflow/contrib/grid_rnn/python/ops/grid_rnn_cell.py
@@ -192,13 +192,14 @@ class GridRNNCell(rnn.RNNCell):
output_tensors = [new_output[i] for i in self._config.outputs]
output = array_ops.zeros(
- [0, 0], dtype) if len(output_tensors) == 0 else array_ops.concat_v2(
+ [0, 0], dtype) if len(output_tensors) == 0 else array_ops.concat(
output_tensors, 1)
state_tensors = [new_state[i] for i in self._config.recurrents]
states = array_ops.zeros(
- [0, 0], dtype) if len(state_tensors) == 0 else array_ops.concat_v2(
- state_tensors, 1)
+ [0, 0],
+ dtype) if len(state_tensors) == 0 else array_ops.concat(state_tensors,
+ 1)
return output, states
@@ -429,7 +430,7 @@ def _propagate(dim_indices, conf, cell, c_prev, m_prev, new_output, new_state,
for d in conf.dims[:-1]:
ls_cell_inputs[d.idx] = new_output[d.idx] if new_output[
d.idx] is not None else m_prev[d.idx]
- cell_inputs = array_ops.concat_v2(ls_cell_inputs, 1)
+ cell_inputs = array_ops.concat(ls_cell_inputs, 1)
else:
cell_inputs = array_ops.zeros([m_prev[0].get_shape().as_list()[0], 0],
m_prev[0].dtype)
@@ -439,7 +440,7 @@ def _propagate(dim_indices, conf, cell, c_prev, m_prev, new_output, new_state,
for i in dim_indices:
d = conf.dims[i]
if d.non_recurrent_fn:
- linear_args = array_ops.concat_v2(
+ linear_args = array_ops.concat(
[cell_inputs, last_dim_output],
1) if conf.num_dims > 1 else last_dim_output
with vs.variable_scope('non_recurrent' if conf.tied else
@@ -454,7 +455,7 @@ def _propagate(dim_indices, conf, cell, c_prev, m_prev, new_output, new_state,
layers.initializers.xavier_initializer)
else:
if c_prev[i] is not None:
- cell_state = array_ops.concat_v2([c_prev[i], last_dim_output], 1)
+ cell_state = array_ops.concat([c_prev[i], last_dim_output], 1)
else:
# for GRU/RNN, the state is just the previous output
cell_state = last_dim_output
diff --git a/tensorflow/contrib/image/python/ops/image_ops.py b/tensorflow/contrib/image/python/ops/image_ops.py
index 23e43e964e..6db3f61a9e 100644
--- a/tensorflow/contrib/image/python/ops/image_ops.py
+++ b/tensorflow/contrib/image/python/ops/image_ops.py
@@ -80,7 +80,7 @@ def rotate(images, angles):
(image_width - 1) + math_ops.cos(angles) *
(image_height - 1))) / 2.0
num_angles = array_ops.shape(angles)[0]
- transforms = array_ops.concat_v2(
+ transforms = array_ops.concat(
values=[
math_ops.cos(angles)[:, None],
-math_ops.sin(angles)[:, None],
diff --git a/tensorflow/contrib/labeled_tensor/python/ops/ops.py b/tensorflow/contrib/labeled_tensor/python/ops/ops.py
index f4b83ed5e6..6b8514fe62 100644
--- a/tensorflow/contrib/labeled_tensor/python/ops/ops.py
+++ b/tensorflow/contrib/labeled_tensor/python/ops/ops.py
@@ -198,7 +198,7 @@ def concat(labeled_tensors, axis_name, name=None):
concat_axis = core.concat_axes(concat_axis_list)
concat_dimension = axis_names.index(axis_name)
- concat_tensor = array_ops.concat_v2(tensors, concat_dimension, name=scope)
+ concat_tensor = array_ops.concat(tensors, concat_dimension, name=scope)
values = list(axes_0.values())
concat_axes = (values[:concat_dimension] + [concat_axis] +
values[concat_dimension + 1:])
diff --git a/tensorflow/contrib/labeled_tensor/python/ops/ops_test.py b/tensorflow/contrib/labeled_tensor/python/ops/ops_test.py
index 87e27ca85f..bbe77f9fef 100644
--- a/tensorflow/contrib/labeled_tensor/python/ops/ops_test.py
+++ b/tensorflow/contrib/labeled_tensor/python/ops/ops_test.py
@@ -928,7 +928,7 @@ class WhereTest(Base):
where_lt = ops.where(condition, x, y)
golden_lt = core.LabeledTensor(
- array_ops.concat_v2([array_ops.ones(3), array_ops.zeros(2)], 0), ['x'])
+ array_ops.concat([array_ops.ones(3), array_ops.zeros(2)], 0), ['x'])
self.assertLabeledTensorsEqual(where_lt, golden_lt)
def test_mismatched_axes(self):
diff --git a/tensorflow/contrib/layers/python/layers/embedding_ops.py b/tensorflow/contrib/layers/python/layers/embedding_ops.py
index 6d5f5f8474..f0ed31d1d1 100644
--- a/tensorflow/contrib/layers/python/layers/embedding_ops.py
+++ b/tensorflow/contrib/layers/python/layers/embedding_ops.py
@@ -159,7 +159,7 @@ def safe_embedding_lookup_sparse(embedding_weights,
# Reshape back from linear ids back into higher-dimensional dense result.
final_result = array_ops.reshape(
result,
- array_ops.concat_v2([
+ array_ops.concat([
array_ops.slice(
math_ops.cast(original_shape, dtypes.int32), [0],
[original_rank - 1]),
@@ -307,7 +307,7 @@ def _sampled_scattered_embedding_lookup(
math_ops.subtract(array_ops.rank(sampled_candidates), 1)]
sampled_candidates_shape = array_ops.shape(sampled_candidates)
dimension_tensor = array_ops.reshape(dimension, shape=[1,])
- expected_shape = array_ops.concat_v2([values_shape, dimension_tensor], 0)
+ expected_shape = array_ops.concat([values_shape, dimension_tensor], 0)
with ops.control_dependencies([control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(sampled_candidates_shape,
expected_shape)),
@@ -346,8 +346,8 @@ def _sampled_scattered_embedding_lookup(
result = embedding_ops.embedding_lookup(
params, ids, partition_strategy="div", validate_indices=False)
- return array_ops.reshape(
- result, array_ops.concat_v2([values_shape, [dimension]], 0))
+ return array_ops.reshape(result,
+ array_ops.concat([values_shape, [dimension]], 0))
def scattered_embedding_lookup_sparse(params,
@@ -461,7 +461,7 @@ def embedding_lookup_unique(params, ids, name=None):
unique_ids, idx = array_ops.unique(ids_flat)
unique_embeddings = embedding_ops.embedding_lookup(params, unique_ids)
embeds_flat = array_ops.gather(unique_embeddings, idx)
- embed_shape = array_ops.concat_v2(
+ embed_shape = array_ops.concat(
[shape, array_ops.shape(unique_embeddings)[1:]], 0)
embeds = array_ops.reshape(embeds_flat, embed_shape)
embeds.set_shape(ids.get_shape().concatenate(
diff --git a/tensorflow/contrib/layers/python/layers/feature_column_ops.py b/tensorflow/contrib/layers/python/layers/feature_column_ops.py
index ba77c8911b..62b306c2ea 100644
--- a/tensorflow/contrib/layers/python/layers/feature_column_ops.py
+++ b/tensorflow/contrib/layers/python/layers/feature_column_ops.py
@@ -181,7 +181,7 @@ def _input_from_feature_columns(columns_to_tensors,
except ValueError as e:
raise ValueError('Error creating input layer for column: {}.\n'
'{}, {}'.format(column.name, e, ee))
- return array_ops.concat_v2(output_tensors, output_rank - 1)
+ return array_ops.concat(output_tensors, output_rank - 1)
def input_from_feature_columns(columns_to_tensors,
diff --git a/tensorflow/contrib/layers/python/layers/layers.py b/tensorflow/contrib/layers/python/layers/layers.py
index 43dabc8067..6e41f7f4d6 100644
--- a/tensorflow/contrib/layers/python/layers/layers.py
+++ b/tensorflow/contrib/layers/python/layers/layers.py
@@ -1216,8 +1216,8 @@ def _sparse_inner_flatten(inputs, new_rank):
"""Helper function for `inner_flatten`."""
outer_dimensions = inputs.dense_shape[:new_rank - 1]
inner_dimensions = inputs.dense_shape[new_rank - 1:]
- new_shape = array_ops.concat_v2((outer_dimensions,
- [math_ops.reduce_prod(inner_dimensions)]), 0)
+ new_shape = array_ops.concat((outer_dimensions,
+ [math_ops.reduce_prod(inner_dimensions)]), 0)
flattened = sparse_ops.sparse_reshape(inputs, new_shape)
return flattened
@@ -1229,7 +1229,7 @@ def _dense_inner_flatten(inputs, new_rank):
with ops.control_dependencies([rank_assertion]):
outer_dimensions = array_ops.strided_slice(
array_ops.shape(inputs), [0], [new_rank - 1])
- new_shape = array_ops.concat_v2((outer_dimensions, [-1]), 0)
+ new_shape = array_ops.concat((outer_dimensions, [-1]), 0)
reshaped = array_ops.reshape(inputs, new_shape)
# if `new_rank` is an integer, try to calculate new shape.
@@ -1996,7 +1996,7 @@ def unit_norm(inputs, dim, epsilon=1e-7, scope=None):
array_ops.strided_slice(array_ops.shape(inputs), [dim], [dim + 1]))
if dim < (input_rank - 1):
multiples.append(array_ops.ones([input_rank - 1 - dim], dtypes.int32))
- multiples = array_ops.concat_v2(multiples, 0)
+ multiples = array_ops.concat(multiples, 0)
return math_ops.div(inputs, array_ops.tile(lengths, multiples))
diff --git a/tensorflow/contrib/layers/python/layers/target_column.py b/tensorflow/contrib/layers/python/layers/target_column.py
index dd8abb5421..3e639a180e 100644
--- a/tensorflow/contrib/layers/python/layers/target_column.py
+++ b/tensorflow/contrib/layers/python/layers/target_column.py
@@ -300,7 +300,7 @@ class _MultiClassTargetColumn(_TargetColumn):
def logits_to_predictions(self, logits, proba=False):
if self.num_label_columns == 1:
- logits = array_ops.concat_v2([array_ops.zeros_like(logits), logits], 1)
+ logits = array_ops.concat([array_ops.zeros_like(logits), logits], 1)
if proba:
return nn.softmax(logits)
@@ -388,7 +388,7 @@ class _BinarySvmTargetColumn(_MultiClassTargetColumn):
raise ValueError(
"logits to probabilities is not supported for _BinarySvmTargetColumn")
- logits = array_ops.concat_v2([array_ops.zeros_like(logits), logits], 1)
+ logits = array_ops.concat([array_ops.zeros_like(logits), logits], 1)
return math_ops.argmax(logits, 1)
diff --git a/tensorflow/contrib/learn/python/learn/estimators/dynamic_rnn_estimator.py b/tensorflow/contrib/learn/python/learn/estimators/dynamic_rnn_estimator.py
index e59c375c1b..7ecb2cd1e5 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/dynamic_rnn_estimator.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/dynamic_rnn_estimator.py
@@ -238,8 +238,8 @@ def _concatenate_context_input(sequence_input, context_input):
padded_length = array_ops.shape(sequence_input)[1]
tiled_context_input = array_ops.tile(
array_ops.expand_dims(context_input, 1),
- array_ops.concat_v2([[1], [padded_length], [1]], 0))
- return array_ops.concat_v2([sequence_input, tiled_context_input], 2)
+ array_ops.concat([[1], [padded_length], [1]], 0))
+ return array_ops.concat([sequence_input, tiled_context_input], 2)
def build_sequence_input(features,
@@ -402,7 +402,7 @@ def _multi_value_predictions(
flattened_activations, proba=True)
flat_predictions = math_ops.argmax(flat_probabilities, 1)
if target_column.num_label_columns == 1:
- probability_shape = array_ops.concat_v2([activations_shape[:2], [2]], 0)
+ probability_shape = array_ops.concat([activations_shape[:2], [2]], 0)
else:
probability_shape = activations_shape
probabilities = array_ops.reshape(
diff --git a/tensorflow/contrib/learn/python/learn/estimators/estimator_test.py b/tensorflow/contrib/learn/python/learn/estimators/estimator_test.py
index c6b68f7eec..0b4897d4b2 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/estimator_test.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/estimator_test.py
@@ -123,7 +123,7 @@ def boston_eval_fn():
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
- return array_ops.concat_v2([features, features], 0), array_ops.concat_v2(
+ return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
diff --git a/tensorflow/contrib/learn/python/learn/estimators/head.py b/tensorflow/contrib/learn/python/learn/estimators/head.py
index 810bdfb1b5..dd3afa0efb 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/head.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/head.py
@@ -450,7 +450,7 @@ def _log_loss_with_two_classes(logits, labels):
def _one_class_to_two_class_logits(logits):
- return array_ops.concat_v2((array_ops.zeros_like(logits), logits), 1)
+ return array_ops.concat((array_ops.zeros_like(logits), logits), 1)
class _BinaryLogisticHead(_Head):
diff --git a/tensorflow/contrib/learn/python/learn/estimators/kmeans.py b/tensorflow/contrib/learn/python/learn/estimators/kmeans.py
index 8e66bb1274..953a26d3f9 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/kmeans.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/kmeans.py
@@ -236,7 +236,7 @@ class KMeansClustering(evaluable.Evaluable, trainable.Trainable):
if isinstance(features, dict):
keys = sorted(features.keys())
with ops.colocate_with(features[keys[0]]):
- features = array_ops.concat_v2([features[k] for k in keys], 1)
+ features = array_ops.concat([features[k] for k in keys], 1)
return features
def _get_model_function(self):
diff --git a/tensorflow/contrib/legacy_seq2seq/python/kernel_tests/seq2seq_test.py b/tensorflow/contrib/legacy_seq2seq/python/kernel_tests/seq2seq_test.py
index 993ce87211..041cc6bf82 100644
--- a/tensorflow/contrib/legacy_seq2seq/python/kernel_tests/seq2seq_test.py
+++ b/tensorflow/contrib/legacy_seq2seq/python/kernel_tests/seq2seq_test.py
@@ -325,7 +325,7 @@ class Seq2SeqTest(test.TestCase):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = core_rnn.static_rnn(
cell, inp, dtype=dtypes.float32)
- attn_states = array_ops.concat_v2([
+ attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
@@ -347,7 +347,7 @@ class Seq2SeqTest(test.TestCase):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = core_rnn.static_rnn(
cell, inp, dtype=dtypes.float32)
- attn_states = array_ops.concat_v2([
+ attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
@@ -411,7 +411,7 @@ class Seq2SeqTest(test.TestCase):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = core_rnn.static_rnn(
cell, inp, dtype=dtypes.float32)
- attn_states = array_ops.concat_v2([
+ attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
@@ -439,9 +439,9 @@ class Seq2SeqTest(test.TestCase):
inp = constant_op.constant(0.5, shape=[2, 2, 2])
enc_outputs, enc_state = core_rnn.static_rnn(
cell, inp, dtype=dtypes.float32)
- attn_states = array_ops.concat_v2([
- array_ops.reshape(e, [-1, 1, cell.output_size]) for e in
- enc_outputs
+ attn_states = array_ops.concat([
+ array_ops.reshape(e, [-1, 1, cell.output_size])
+ for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
dec, mem = seq2seq_lib.attention_decoder(
@@ -466,7 +466,7 @@ class Seq2SeqTest(test.TestCase):
cell = core_rnn_cell_impl.GRUCell(2)
enc_outputs, enc_state = core_rnn.static_rnn(
cell, inp, dtype=dtypes.float32)
- attn_states = array_ops.concat_v2([
+ attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [
diff --git a/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py b/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py
index 9ca66fcd5d..bc36e9dced 100644
--- a/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py
+++ b/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py
@@ -639,7 +639,7 @@ def attention_decoder(decoder_inputs,
ndims = q.get_shape().ndims
if ndims:
assert ndims == 2
- query = array_ops.concat_v2(query_list, 1)
+ query = array_ops.concat(query_list, 1)
for a in xrange(num_heads):
with variable_scope.variable_scope("Attention_%d" % a):
y = linear(query, attention_vec_size, True)
@@ -853,7 +853,7 @@ def embedding_attention_seq2seq(encoder_inputs,
top_states = [
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in encoder_outputs
]
- attention_states = array_ops.concat_v2(top_states, 1)
+ attention_states = array_ops.concat(top_states, 1)
# Decoder.
output_size = None
diff --git a/tensorflow/contrib/linalg/python/kernel_tests/linear_operator_diag_test.py b/tensorflow/contrib/linalg/python/kernel_tests/linear_operator_diag_test.py
index 62f741245b..ab6b91087d 100644
--- a/tensorflow/contrib/linalg/python/kernel_tests/linear_operator_diag_test.py
+++ b/tensorflow/contrib/linalg/python/kernel_tests/linear_operator_diag_test.py
@@ -132,7 +132,7 @@ class LinearOperatorDiagTest(
self.assertAllEqual((2, 1, 3, 3), operator.shape)
# Create a batch matrix with the broadcast shape of operator.
- diag_broadcast = array_ops.concat_v2((diag, diag), 1)
+ diag_broadcast = array_ops.concat((diag, diag), 1)
mat = array_ops.matrix_diag(diag_broadcast)
self.assertAllEqual((2, 2, 3, 3), mat.get_shape()) # being pedantic.
diff --git a/tensorflow/contrib/linalg/python/ops/linear_operator_composition.py b/tensorflow/contrib/linalg/python/ops/linear_operator_composition.py
index a6c9c30d04..3e118ebbd4 100644
--- a/tensorflow/contrib/linalg/python/ops/linear_operator_composition.py
+++ b/tensorflow/contrib/linalg/python/ops/linear_operator_composition.py
@@ -222,7 +222,7 @@ class LinearOperatorComposition(linear_operator.LinearOperator):
zeros += array_ops.zeros(shape=operator.batch_shape_dynamic())
batch_shape = array_ops.shape(zeros)
- return array_ops.concat_v2((batch_shape, matrix_shape), 0)
+ return array_ops.concat((batch_shape, matrix_shape), 0)
def _apply(self, x, adjoint=False):
# If self.operators = [A, B], and not adjoint, then
diff --git a/tensorflow/contrib/linalg/python/ops/linear_operator_diag.py b/tensorflow/contrib/linalg/python/ops/linear_operator_diag.py
index 3595a4a52a..d59e8be767 100644
--- a/tensorflow/contrib/linalg/python/ops/linear_operator_diag.py
+++ b/tensorflow/contrib/linalg/python/ops/linear_operator_diag.py
@@ -169,7 +169,7 @@ class LinearOperatorDiag(linear_operator.LinearOperator):
def _shape_dynamic(self):
d_shape = array_ops.shape(self._diag)
k = d_shape[-1]
- return array_ops.concat_v2((d_shape, [k]), 0)
+ return array_ops.concat((d_shape, [k]), 0)
def _assert_non_singular(self):
return linear_operator_util.assert_no_entries_with_modulus_zero(
diff --git a/tensorflow/contrib/linalg/python/ops/linear_operator_identity.py b/tensorflow/contrib/linalg/python/ops/linear_operator_identity.py
index cd492eee30..8ea3894bda 100644
--- a/tensorflow/contrib/linalg/python/ops/linear_operator_identity.py
+++ b/tensorflow/contrib/linalg/python/ops/linear_operator_identity.py
@@ -227,7 +227,7 @@ class LinearOperatorIdentity(linear_operator.LinearOperator):
if self._batch_shape_arg is None:
return matrix_shape
- return array_ops.concat_v2((self._batch_shape_arg, matrix_shape), 0)
+ return array_ops.concat((self._batch_shape_arg, matrix_shape), 0)
def _assert_non_singular(self):
return control_flow_ops.no_op("assert_non_singular")
@@ -267,8 +267,7 @@ class LinearOperatorIdentity(linear_operator.LinearOperator):
# Dynamic broadcast:
# Always add to an array of zeros, rather than using a "cond", since a
# cond would require copying data from GPU --> CPU.
- special_shape = array_ops.concat_v2(
- (self.batch_shape_dynamic(), [1, 1]), 0)
+ special_shape = array_ops.concat((self.batch_shape_dynamic(), [1, 1]), 0)
zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype)
return x + zeros
diff --git a/tensorflow/contrib/linalg/python/ops/linear_operator_test_util.py b/tensorflow/contrib/linalg/python/ops/linear_operator_test_util.py
index 49b9f1feb8..9579b8fabd 100644
--- a/tensorflow/contrib/linalg/python/ops/linear_operator_test_util.py
+++ b/tensorflow/contrib/linalg/python/ops/linear_operator_test_util.py
@@ -258,7 +258,7 @@ class SquareLinearOperatorDerivedClassTest(LinearOperatorDerivedClassTest):
else:
batch_shape = operator.batch_shape_dynamic()
n = operator.domain_dimension_dynamic()
- x_shape = array_ops.concat_v2((batch_shape, [n, r]), 0)
+ x_shape = array_ops.concat((batch_shape, [n, r]), 0)
return random_normal(x_shape, dtype=operator.dtype)
@@ -315,7 +315,7 @@ class NonSquareLinearOperatorDerivedClassTest(LinearOperatorDerivedClassTest):
n = operator.range_dimension_dynamic()
else:
n = operator.domain_dimension_dynamic()
- x_shape = array_ops.concat_v2((batch_shape, [n, r]), 0)
+ x_shape = array_ops.concat((batch_shape, [n, r]), 0)
return random_normal(x_shape, dtype=operator.dtype)
diff --git a/tensorflow/contrib/metrics/python/ops/metric_ops.py b/tensorflow/contrib/metrics/python/ops/metric_ops.py
index 613c69637b..c455340759 100644
--- a/tensorflow/contrib/metrics/python/ops/metric_ops.py
+++ b/tensorflow/contrib/metrics/python/ops/metric_ops.py
@@ -1502,12 +1502,12 @@ def expand_and_tile(tensor, multiple, dim=0, name=None):
array_ops.size(tensor.dense_shape) + dim, [1])
else:
expand_dims = [dim]
- expanded_shape = array_ops.concat_v2(
- (array_ops.strided_slice(tensor.dense_shape, [0], expand_dims),
- [1],
+ expanded_shape = array_ops.concat(
+ (array_ops.strided_slice(tensor.dense_shape, [0], expand_dims), [1],
array_ops.strided_slice(
tensor.dense_shape, expand_dims, [-1], end_mask=1 << 0)),
- 0, name='expanded_shape')
+ 0,
+ name='expanded_shape')
expanded = sparse_ops.sparse_reshape(
tensor, shape=expanded_shape, name='expand')
if multiple == 1:
@@ -1521,7 +1521,7 @@ def expand_and_tile(tensor, multiple, dim=0, name=None):
if multiple == 1:
return expanded
ones = array_ops.ones_like(array_ops.shape(tensor))
- tile_multiples = array_ops.concat_v2(
+ tile_multiples = array_ops.concat(
(ones[:dim], (multiple,), ones[dim:]), 0, name='multiples')
return array_ops.tile(expanded, tile_multiples, name=scope)
diff --git a/tensorflow/contrib/metrics/python/ops/metric_ops_test.py b/tensorflow/contrib/metrics/python/ops/metric_ops_test.py
index a64e93d45c..35efaf14d1 100644
--- a/tensorflow/contrib/metrics/python/ops/metric_ops_test.py
+++ b/tensorflow/contrib/metrics/python/ops/metric_ops_test.py
@@ -4466,14 +4466,14 @@ class StreamingMeanIOUTest(test.TestCase):
self.assertAlmostEqual(desired_output, miou.eval())
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
- predictions = array_ops.concat_v2(
+ predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
- labels = array_ops.concat_v2(
+ labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
@@ -4513,14 +4513,14 @@ class StreamingMeanIOUTest(test.TestCase):
self.assertEqual(0., miou.eval())
def testResultsWithSomeMissing(self):
- predictions = array_ops.concat_v2(
+ predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
- labels = array_ops.concat_v2(
+ labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
@@ -4528,7 +4528,7 @@ class StreamingMeanIOUTest(test.TestCase):
],
0)
num_classes = 2
- weights = array_ops.concat_v2(
+ weights = array_ops.concat(
[
constant_op.constant(
0, shape=[1]), constant_op.constant(
diff --git a/tensorflow/contrib/ndlstm/python/lstm2d.py b/tensorflow/contrib/ndlstm/python/lstm2d.py
index 238f78bcd3..da9698ec5b 100644
--- a/tensorflow/contrib/ndlstm/python/lstm2d.py
+++ b/tensorflow/contrib/ndlstm/python/lstm2d.py
@@ -85,8 +85,8 @@ def horizontal_lstm(images, num_filters_out, scope=None):
with variable_scope.variable_scope("rl"):
hidden_sequence_rl = (lstm1d.ndlstm_base(
sequence, num_filters_out - num_filters_out // 2, reverse=1))
- output_sequence = array_ops.concat_v2(
- [hidden_sequence_lr, hidden_sequence_rl], 2)
+ output_sequence = array_ops.concat([hidden_sequence_lr, hidden_sequence_rl],
+ 2)
output = sequence_to_images(output_sequence, batch_size)
return output
diff --git a/tensorflow/contrib/ndlstm/python/misc.py b/tensorflow/contrib/ndlstm/python/misc.py
index bb5198403b..38eeff84ca 100644
--- a/tensorflow/contrib/ndlstm/python/misc.py
+++ b/tensorflow/contrib/ndlstm/python/misc.py
@@ -91,7 +91,7 @@ def one_hot_mask(labels, num_classes, scope=None):
sparse_labels = math_ops.to_int32(array_ops.reshape(labels, [-1, 1]))
sparse_size, _ = _shape(sparse_labels)
indices = array_ops.reshape(math_ops.range(0, sparse_size, 1), [-1, 1])
- concated = array_ops.concat_v2([indices, sparse_labels], 1)
+ concated = array_ops.concat([indices, sparse_labels], 1)
dense_result = sparse_ops.sparse_to_dense(concated,
[sparse_size, num_classes], 1.0,
0.0)
diff --git a/tensorflow/contrib/opt/python/training/external_optimizer.py b/tensorflow/contrib/opt/python/training/external_optimizer.py
index cc3f9fb3d5..ff80167ff4 100644
--- a/tensorflow/contrib/opt/python/training/external_optimizer.py
+++ b/tensorflow/contrib/opt/python/training/external_optimizer.py
@@ -203,7 +203,7 @@ class ExternalOptimizerInterface(object):
return array_ops.reshape(tensors[0], [-1])
else:
flattened = [array_ops.reshape(tensor, [-1]) for tensor in tensors]
- return array_ops.concat_v2(flattened, 0)
+ return array_ops.concat(flattened, 0)
def _make_eval_func(self, tensors, session, feed_dict, fetches,
callback=None):
diff --git a/tensorflow/contrib/rnn/python/kernel_tests/core_rnn_test.py b/tensorflow/contrib/rnn/python/kernel_tests/core_rnn_test.py
index 05a4e826b5..3c84c34726 100644
--- a/tensorflow/contrib/rnn/python/kernel_tests/core_rnn_test.py
+++ b/tensorflow/contrib/rnn/python/kernel_tests/core_rnn_test.py
@@ -1354,7 +1354,7 @@ class BidirectionalRNNTest(test.TestCase):
dtype=dtypes.float32,
time_major=use_time_major,
scope=scope)
- outputs = array_ops.concat_v2(outputs, 2)
+ outputs = array_ops.concat(outputs, 2)
state_fw, state_bw = states
outputs_shape = [None, max_length, 2 * num_units]
if use_shape:
diff --git a/tensorflow/contrib/rnn/python/kernel_tests/fused_rnn_cell_test.py b/tensorflow/contrib/rnn/python/kernel_tests/fused_rnn_cell_test.py
index 194d9522bd..020ac2c522 100644
--- a/tensorflow/contrib/rnn/python/kernel_tests/fused_rnn_cell_test.py
+++ b/tensorflow/contrib/rnn/python/kernel_tests/fused_rnn_cell_test.py
@@ -146,7 +146,7 @@ class FusedRnnCellTest(test.TestCase):
inputs, dtype=dtypes.float64, scope="fw")
bw_outputs, bw_state = fused_bw_cell(
inputs, dtype=dtypes.float64, scope="bw")
- outputs = array_ops.concat_v2([fw_outputs, bw_outputs], 2)
+ outputs = array_ops.concat([fw_outputs, bw_outputs], 2)
fused_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("fused/")
diff --git a/tensorflow/contrib/rnn/python/kernel_tests/rnn_cell_test.py b/tensorflow/contrib/rnn/python/kernel_tests/rnn_cell_test.py
index 5a1b7a4a62..5c56f3a0d9 100644
--- a/tensorflow/contrib/rnn/python/kernel_tests/rnn_cell_test.py
+++ b/tensorflow/contrib/rnn/python/kernel_tests/rnn_cell_test.py
@@ -557,7 +557,7 @@ class RNNCellTest(test.TestCase):
dtype=dtypes.float32)
output, state = cell(inputs, zero_state)
if state_is_tuple:
- concat_state = array_ops.concat_v2(
+ concat_state = array_ops.concat(
[state[0][0], state[0][1], state[1], state[2]], 1)
else:
concat_state = state
@@ -617,14 +617,14 @@ class RNNCellTest(test.TestCase):
(batch_size, attn_length * num_units), 0.0, 1.0, seed=seed + 4)
zero_state = ((zeros1, zeros2), zeros3, attn_state_zeros)
if not state_is_tuple:
- zero_state = array_ops.concat_v2([
+ zero_state = array_ops.concat([
zero_state[0][0], zero_state[0][1], zero_state[1], zero_state[2]
], 1)
inputs = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 5)
output, state = cell(inputs, zero_state)
if state_is_tuple:
- state = array_ops.concat_v2(
+ state = array_ops.concat(
[state[0][0], state[0][1], state[1], state[2]], 1)
sess.run(variables.global_variables_initializer())
self.assertAllClose(sess.run(output), expected_output)
diff --git a/tensorflow/contrib/rnn/python/ops/core_rnn.py b/tensorflow/contrib/rnn/python/ops/core_rnn.py
index 9f139c5e43..d254e717d5 100644
--- a/tensorflow/contrib/rnn/python/ops/core_rnn.py
+++ b/tensorflow/contrib/rnn/python/ops/core_rnn.py
@@ -349,7 +349,7 @@ def static_bidirectional_rnn(cell_fw, cell_bw, inputs,
flat_output_bw = nest.flatten(output_bw)
flat_outputs = tuple(
- array_ops.concat_v2([fw, bw], 1)
+ array_ops.concat([fw, bw], 1)
for fw, bw in zip(flat_output_fw, flat_output_bw))
outputs = nest.pack_sequence_as(structure=output_fw,
diff --git a/tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.py b/tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.py
index d89881a203..d02168d3ad 100644
--- a/tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.py
+++ b/tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.py
@@ -188,7 +188,7 @@ class BasicLSTMCell(RNNCell):
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
- new_state = array_ops.concat_v2([new_c, new_h], 1)
+ new_state = array_ops.concat([new_c, new_h], 1)
return new_h, new_state
@@ -382,7 +382,7 @@ class LSTMCell(RNNCell):
# pylint: enable=invalid-unary-operand-type
new_state = (LSTMStateTuple(c, m) if self._state_is_tuple else
- array_ops.concat_v2([c, m], 1))
+ array_ops.concat([c, m], 1))
return m, new_state
@@ -655,7 +655,7 @@ class MultiRNNCell(RNNCell):
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
new_states = (tuple(new_states) if self._state_is_tuple else
- array_ops.concat_v2(new_states, 1))
+ array_ops.concat(new_states, 1))
return cur_inp, new_states
@@ -748,7 +748,7 @@ def _linear(args, output_size, bias, bias_start=0.0, scope=None):
if len(args) == 1:
res = math_ops.matmul(args[0], weights)
else:
- res = math_ops.matmul(array_ops.concat_v2(args, 1), weights)
+ res = math_ops.matmul(array_ops.concat(args, 1), weights)
if not bias:
return res
with vs.variable_scope(outer_scope) as inner_scope:
diff --git a/tensorflow/contrib/rnn/python/ops/gru_ops.py b/tensorflow/contrib/rnn/python/ops/gru_ops.py
index 3dbb7dc693..8b531dab66 100644
--- a/tensorflow/contrib/rnn/python/ops/gru_ops.py
+++ b/tensorflow/contrib/rnn/python/ops/gru_ops.py
@@ -82,11 +82,11 @@ def _GRUBlockCellGrad(op, *grad):
d_x, d_h_prev, d_c_bar, d_r_bar_u_bar = _gru_ops_so.gru_block_cell_grad(
x, h_prev, w_ru, w_c, b_ru, b_c, r, u, c, d_h)
- x_h_prev = array_ops.concat_v2([x, h_prev], 1)
+ x_h_prev = array_ops.concat([x, h_prev], 1)
d_w_ru = math_ops.matmul(x_h_prev, d_r_bar_u_bar, transpose_a=True)
d_b_ru = nn_ops.bias_add_grad(d_r_bar_u_bar)
- x_h_prevr = array_ops.concat_v2([x, h_prev * r], 1)
+ x_h_prevr = array_ops.concat([x, h_prev * r], 1)
d_w_c = math_ops.matmul(x_h_prevr, d_c_bar, transpose_a=True)
d_b_c = nn_ops.bias_add_grad(d_c_bar)
diff --git a/tensorflow/contrib/rnn/python/ops/lstm_ops.py b/tensorflow/contrib/rnn/python/ops/lstm_ops.py
index 9af461ce98..c1578f7da9 100644
--- a/tensorflow/contrib/rnn/python/ops/lstm_ops.py
+++ b/tensorflow/contrib/rnn/python/ops/lstm_ops.py
@@ -277,7 +277,7 @@ def _LSTMBlockCellGrad(op, *grad):
h_prev_grad.get_shape().merge_with(h_prev.get_shape())
# Backprop from dicfo to w.
- xh = array_ops.concat_v2([x, h_prev], 1)
+ xh = array_ops.concat([x, h_prev], 1)
w_grad = math_ops.matmul(xh, dicfo, transpose_a=True)
w_grad.get_shape().merge_with(w.get_shape())
@@ -527,9 +527,9 @@ class LSTMBlockWrapper(fused_rnn_cell.FusedRNNCell):
# correctly,since we want to access the last valid state at
# sequence_length - 1, which can even be -1, corresponding to the
# initial state.
- mod_cell_states = array_ops.concat_v2(
+ mod_cell_states = array_ops.concat(
[array_ops.expand_dims(initial_cell_state, [0]), cell_states], 0)
- mod_outputs = array_ops.concat_v2(
+ mod_outputs = array_ops.concat(
[array_ops.expand_dims(initial_output, [0]), outputs], 0)
final_cell_state = self._gather_states(mod_cell_states, sequence_length,
batch_size)
diff --git a/tensorflow/contrib/rnn/python/ops/rnn.py b/tensorflow/contrib/rnn/python/ops/rnn.py
index 3bcedd9f46..3cb027167e 100644
--- a/tensorflow/contrib/rnn/python/ops/rnn.py
+++ b/tensorflow/contrib/rnn/python/ops/rnn.py
@@ -213,7 +213,7 @@ def stack_bidirectional_dynamic_rnn(cells_fw,
sequence_length=sequence_length,
dtype=dtype)
# Concat the outputs to create the new input.
- prev_layer = array_ops.concat_v2(outputs, 2)
+ prev_layer = array_ops.concat(outputs, 2)
states_fw.append(state_fw)
states_bw.append(state_bw)
diff --git a/tensorflow/contrib/rnn/python/ops/rnn_cell.py b/tensorflow/contrib/rnn/python/ops/rnn_cell.py
index 8e284fda04..aab8fe1805 100644
--- a/tensorflow/contrib/rnn/python/ops/rnn_cell.py
+++ b/tensorflow/contrib/rnn/python/ops/rnn_cell.py
@@ -48,7 +48,7 @@ def _get_concat_variable(name, shape, dtype, num_shards):
if value.name == concat_full_name:
return value
- concat_variable = array_ops.concat_v2(sharded_variable, 0, name=concat_name)
+ concat_variable = array_ops.concat(sharded_variable, 0, name=concat_name)
ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES,
concat_variable)
return concat_variable
@@ -215,7 +215,7 @@ class CoupledInputForgetGateLSTMCell(core_rnn_cell.RNNCell):
dtype=dtype)
# j = new_input, f = forget_gate, o = output_gate
- cell_inputs = array_ops.concat_v2([inputs, m_prev], 1)
+ cell_inputs = array_ops.concat([inputs, m_prev], 1)
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
j, f, o = array_ops.split(value=lstm_matrix, num_or_size_splits=3, axis=1)
@@ -248,8 +248,8 @@ class CoupledInputForgetGateLSTMCell(core_rnn_cell.RNNCell):
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
- new_state = (core_rnn_cell.LSTMStateTuple(c, m) if self._state_is_tuple
- else array_ops.concat_v2([c, m], 1))
+ new_state = (core_rnn_cell.LSTMStateTuple(c, m) if self._state_is_tuple else
+ array_ops.concat([c, m], 1))
return m, new_state
@@ -361,8 +361,8 @@ class TimeFreqLSTMCell(core_rnn_cell.RNNCell):
m_prev = array_ops.slice(state, [0, (2*fq+1)*self._num_units],
[-1, self._num_units])
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
- cell_inputs = array_ops.concat_v2(
- [freq_inputs[fq], m_prev, m_prev_freq], 1)
+ cell_inputs = array_ops.concat([freq_inputs[fq], m_prev, m_prev_freq],
+ 1)
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
i, j, f, o = array_ops.split(
value=lstm_matrix, num_or_size_splits=4, axis=1)
@@ -384,11 +384,11 @@ class TimeFreqLSTMCell(core_rnn_cell.RNNCell):
m = sigmoid(o) * tanh(c)
m_prev_freq = m
if fq == 0:
- state_out = array_ops.concat_v2([c, m], 1)
+ state_out = array_ops.concat([c, m], 1)
m_out = m
else:
- state_out = array_ops.concat_v2([state_out, c, m], 1)
- m_out = array_ops.concat_v2([m_out, m], 1)
+ state_out = array_ops.concat([state_out, c, m], 1)
+ m_out = array_ops.concat([m_out, m], 1)
return m_out, state_out
def _make_tf_features(self, input_feat):
@@ -566,8 +566,8 @@ class GridLSTMCell(core_rnn_cell.RNNCell):
if self._state_is_tuple:
state_out = self._state_tuple_type(*state_out_lst)
else:
- state_out = array_ops.concat_v2(state_out_lst, 1)
- m_out = array_ops.concat_v2(m_out_lst, 1)
+ state_out = array_ops.concat(state_out_lst, 1)
+ m_out = array_ops.concat(m_out_lst, 1)
return m_out, state_out
def _compute(self, freq_inputs, block, state, batch_size,
@@ -665,7 +665,7 @@ class GridLSTMCell(core_rnn_cell.RNNCell):
[-1, self._num_units])
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
- cell_inputs = array_ops.concat_v2(
+ cell_inputs = array_ops.concat(
[freq_inputs[freq_index], m_prev_time, m_prev_freq], 1)
# F-LSTM
@@ -1004,7 +1004,7 @@ class BidirectionalGridLSTMCell(GridLSTMCell):
bwd_state_out_lst.extend(bwd_state_out_lst_current)
state_out = self._state_tuple_type(*(fwd_state_out_lst + bwd_state_out_lst))
# Outputs are always concated as it is never used separately.
- m_out = array_ops.concat_v2(fwd_m_out_lst + bwd_m_out_lst, 1)
+ m_out = array_ops.concat(fwd_m_out_lst + bwd_m_out_lst, 1)
return m_out, state_out
@@ -1101,19 +1101,19 @@ class AttentionCellWrapper(core_rnn_cell.RNNCell):
inputs = _linear([inputs, attns], input_size, True)
lstm_output, new_state = self._cell(inputs, state)
if self._state_is_tuple:
- new_state_cat = array_ops.concat_v2(nest.flatten(new_state), 1)
+ new_state_cat = array_ops.concat(nest.flatten(new_state), 1)
else:
new_state_cat = new_state
new_attns, new_attn_states = self._attention(new_state_cat, attn_states)
with vs.variable_scope("attn_output_projection"):
output = _linear([lstm_output, new_attns], self._attn_size, True)
- new_attn_states = array_ops.concat_v2(
+ new_attn_states = array_ops.concat(
[new_attn_states, array_ops.expand_dims(output, 1)], 1)
new_attn_states = array_ops.reshape(
new_attn_states, [-1, self._attn_length * self._attn_size])
new_state = (new_state, new_attns, new_attn_states)
if not self._state_is_tuple:
- new_state = array_ops.concat_v2(list(new_state), 1)
+ new_state = array_ops.concat(list(new_state), 1)
return output, new_state
def _attention(self, query, attn_states):
@@ -1228,7 +1228,7 @@ class LayerNormBasicLSTMCell(core_rnn_cell.RNNCell):
with vs.variable_scope(scope or "layer_norm_basic_lstm_cell"):
c, h = state
- args = array_ops.concat_v2([inputs, h], 1)
+ args = array_ops.concat([inputs, h], 1)
concat = self._linear(args)
i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)
diff --git a/tensorflow/contrib/seq2seq/python/ops/attention_decoder_fn.py b/tensorflow/contrib/seq2seq/python/ops/attention_decoder_fn.py
index 2d2482e6b1..28c957d26c 100644
--- a/tensorflow/contrib/seq2seq/python/ops/attention_decoder_fn.py
+++ b/tensorflow/contrib/seq2seq/python/ops/attention_decoder_fn.py
@@ -124,7 +124,7 @@ def attention_decoder_fn_train(encoder_state,
cell_output = attention
# combine cell_input and attention
- next_input = array_ops.concat_v2([cell_input, attention], 1)
+ next_input = array_ops.concat([cell_input, attention], 1)
return (None, cell_state, next_input, cell_output, context_state)
@@ -297,7 +297,7 @@ def attention_decoder_fn_inference(output_fn,
cell_input = array_ops.gather(embeddings, next_input_id)
# combine cell_input and attention
- next_input = array_ops.concat_v2([cell_input, attention], 1)
+ next_input = array_ops.concat([cell_input, attention], 1)
# if time > maxlen, return all true vector
done = control_flow_ops.cond(
@@ -392,7 +392,7 @@ def _create_attention_construct_fn(name, num_units, attention_score_fn, reuse):
def construct_fn(attention_query, attention_keys, attention_values):
context = attention_score_fn(attention_query, attention_keys,
attention_values)
- concat_input = array_ops.concat_v2([attention_query, context], 1)
+ concat_input = array_ops.concat([attention_query, context], 1)
attention = layers.linear(
concat_input, num_units, biases_initializer=None, scope=scope)
return attention
diff --git a/tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py b/tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py
index 9636ef1e29..c764887f6d 100644
--- a/tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py
+++ b/tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py
@@ -137,7 +137,7 @@ class BoundingBox(ItemHandler):
side = array_ops.expand_dims(keys_to_tensors[key].values, 0)
sides.append(side)
- bounding_box = array_ops.concat_v2(sides, 0)
+ bounding_box = array_ops.concat(sides, 0)
return array_ops.transpose(bounding_box)
@@ -257,7 +257,7 @@ class SparseTensor(ItemHandler):
ids = math_ops.to_int64(indices.values)
indices_columns_to_preserve = array_ops.slice(
indices.indices, [0, 0], array_ops.stack([-1, rank - 1]))
- new_indices = array_ops.concat_v2(
+ new_indices = array_ops.concat(
[indices_columns_to_preserve, array_ops.reshape(ids, [-1, 1])], 1)
tensor = sparse_tensor.SparseTensor(new_indices, values.values, shape)
diff --git a/tensorflow/contrib/slim/python/slim/nets/inception_v1.py b/tensorflow/contrib/slim/python/slim/nets/inception_v1.py
index 81abcc26b2..ef9a4f6f68 100644
--- a/tensorflow/contrib/slim/python/slim/nets/inception_v1.py
+++ b/tensorflow/contrib/slim/python/slim/nets/inception_v1.py
@@ -106,7 +106,7 @@ def inception_v1_base(inputs, final_endpoint='Mixed_5c', scope='InceptionV1'):
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 32, [1, 1], scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
@@ -128,7 +128,7 @@ def inception_v1_base(inputs, final_endpoint='Mixed_5c', scope='InceptionV1'):
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
@@ -156,7 +156,7 @@ def inception_v1_base(inputs, final_endpoint='Mixed_5c', scope='InceptionV1'):
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
@@ -178,7 +178,7 @@ def inception_v1_base(inputs, final_endpoint='Mixed_5c', scope='InceptionV1'):
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
@@ -200,7 +200,7 @@ def inception_v1_base(inputs, final_endpoint='Mixed_5c', scope='InceptionV1'):
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
@@ -222,7 +222,7 @@ def inception_v1_base(inputs, final_endpoint='Mixed_5c', scope='InceptionV1'):
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
@@ -244,7 +244,7 @@ def inception_v1_base(inputs, final_endpoint='Mixed_5c', scope='InceptionV1'):
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
@@ -272,7 +272,7 @@ def inception_v1_base(inputs, final_endpoint='Mixed_5c', scope='InceptionV1'):
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
@@ -294,7 +294,7 @@ def inception_v1_base(inputs, final_endpoint='Mixed_5c', scope='InceptionV1'):
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
diff --git a/tensorflow/contrib/slim/python/slim/nets/inception_v2.py b/tensorflow/contrib/slim/python/slim/nets/inception_v2.py
index ca14ce7197..46062600e6 100644
--- a/tensorflow/contrib/slim/python/slim/nets/inception_v2.py
+++ b/tensorflow/contrib/slim/python/slim/nets/inception_v2.py
@@ -170,7 +170,7 @@ def inception_v2_base(inputs,
depth(32), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
@@ -205,7 +205,7 @@ def inception_v2_base(inputs,
depth(64), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
@@ -233,7 +233,7 @@ def inception_v2_base(inputs,
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers_lib.max_pool2d(
net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
@@ -268,7 +268,7 @@ def inception_v2_base(inputs,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
@@ -303,7 +303,7 @@ def inception_v2_base(inputs,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
@@ -338,7 +338,7 @@ def inception_v2_base(inputs,
depth(96), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
@@ -374,7 +374,7 @@ def inception_v2_base(inputs,
depth(96), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
@@ -402,7 +402,7 @@ def inception_v2_base(inputs,
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers_lib.max_pool2d(
net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
@@ -437,7 +437,7 @@ def inception_v2_base(inputs,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
@@ -473,7 +473,7 @@ def inception_v2_base(inputs,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
diff --git a/tensorflow/contrib/slim/python/slim/nets/inception_v3.py b/tensorflow/contrib/slim/python/slim/nets/inception_v3.py
index a89dc5dd87..cbd94cb533 100644
--- a/tensorflow/contrib/slim/python/slim/nets/inception_v3.py
+++ b/tensorflow/contrib/slim/python/slim/nets/inception_v3.py
@@ -151,7 +151,7 @@ def inception_v3_base(inputs,
return net, end_points
# 35 x 35 x 192.
- # Inception blocks
+ # Inception blocks
with arg_scope(
[layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d],
stride=1,
@@ -178,7 +178,7 @@ def inception_v3_base(inputs,
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(32), [1, 1], scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
@@ -205,7 +205,7 @@ def inception_v3_base(inputs,
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(64), [1, 1], scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
@@ -232,7 +232,7 @@ def inception_v3_base(inputs,
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(64), [1, 1], scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
@@ -261,7 +261,7 @@ def inception_v3_base(inputs,
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers_lib.max_pool2d(
net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
@@ -294,7 +294,7 @@ def inception_v3_base(inputs,
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
@@ -327,7 +327,7 @@ def inception_v3_base(inputs,
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
@@ -359,7 +359,7 @@ def inception_v3_base(inputs,
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
@@ -392,7 +392,7 @@ def inception_v3_base(inputs,
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
@@ -425,7 +425,7 @@ def inception_v3_base(inputs,
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers_lib.max_pool2d(
net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
@@ -438,7 +438,7 @@ def inception_v3_base(inputs,
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
- branch_1 = array_ops.concat_v2(
+ branch_1 = array_ops.concat(
[
layers.conv2d(
branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
@@ -451,7 +451,7 @@ def inception_v3_base(inputs,
net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
- branch_2 = array_ops.concat_v2(
+ branch_2 = array_ops.concat(
[
layers.conv2d(
branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
@@ -463,7 +463,7 @@ def inception_v3_base(inputs,
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
@@ -477,7 +477,7 @@ def inception_v3_base(inputs,
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
- branch_1 = array_ops.concat_v2(
+ branch_1 = array_ops.concat(
[
layers.conv2d(
branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
@@ -490,7 +490,7 @@ def inception_v3_base(inputs,
net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
- branch_2 = array_ops.concat_v2(
+ branch_2 = array_ops.concat(
[
layers.conv2d(
branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
@@ -502,7 +502,7 @@ def inception_v3_base(inputs,
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
- net = array_ops.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
+ net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
diff --git a/tensorflow/contrib/solvers/python/ops/lanczos.py b/tensorflow/contrib/solvers/python/ops/lanczos.py
index 565639ff12..8631002a53 100644
--- a/tensorflow/contrib/solvers/python/ops/lanczos.py
+++ b/tensorflow/contrib/solvers/python/ops/lanczos.py
@@ -233,5 +233,5 @@ def bidiag_matmul(matrix, alpha, beta, adjoint_b=False, name="bidiag_matmul"):
zero_column = array_ops.expand_dims(
array_ops.zeros(
shape[:1], dtype=matrix.dtype), 1)
- return matrix * alpha + array_ops.concat_v2(
+ return matrix * alpha + array_ops.concat(
[zero_column, matrix[:, :-1] * beta], 1)
diff --git a/tensorflow/contrib/specs/python/specs_ops.py b/tensorflow/contrib/specs/python/specs_ops.py
index 3cbd87ff5e..a6bd4d16c2 100644
--- a/tensorflow/contrib/specs/python/specs_ops.py
+++ b/tensorflow/contrib/specs/python/specs_ops.py
@@ -62,7 +62,7 @@ class Conc(specs_lib.Composable):
def funcall(self, x):
outputs = [f.funcall(x) for f in self.funs]
- return array_ops.concat_v2(outputs, self.dim)
+ return array_ops.concat(outputs, self.dim)
External = specs_lib.External
diff --git a/tensorflow/contrib/tensor_forest/hybrid/python/layers/fully_connected.py b/tensorflow/contrib/tensor_forest/hybrid/python/layers/fully_connected.py
index fb19e92b08..cbb20fc0b8 100644
--- a/tensorflow/contrib/tensor_forest/hybrid/python/layers/fully_connected.py
+++ b/tensorflow/contrib/tensor_forest/hybrid/python/layers/fully_connected.py
@@ -76,7 +76,7 @@ class FlattenedFullyConnectedLayer(hybrid_layer.HybridLayer):
nn_activations[-1],
self.params.layer_size))
- nn_activations_tensor = array_ops.concat_v2(
+ nn_activations_tensor = array_ops.concat(
nn_activations, 1, name="flattened_nn_activations")
return nn_activations_tensor
diff --git a/tensorflow/contrib/tensor_forest/python/ops/data_ops.py b/tensorflow/contrib/tensor_forest/python/ops/data_ops.py
index aa5034ce10..398db3acd0 100644
--- a/tensorflow/contrib/tensor_forest/python/ops/data_ops.py
+++ b/tensorflow/contrib/tensor_forest/python/ops/data_ops.py
@@ -146,7 +146,7 @@ def ParseDataTensorOrDict(data):
processed_dense_features = None
processed_sparse_features = None
if dense_features:
- processed_dense_features = array_ops.concat_v2(dense_features, 1)
+ processed_dense_features = array_ops.concat(dense_features, 1)
data_spec.dense_features_size = dense_features_size
if sparse_features:
processed_sparse_features = sparse_ops.sparse_concat(1, sparse_features)
@@ -186,7 +186,7 @@ def ParseLabelTensorOrDict(labels):
"""
if isinstance(labels, dict):
return math_ops.to_float(
- array_ops.concat_v2(
+ array_ops.concat(
[
sparse_ops.sparse_tensor_to_dense(
labels[k], default_value=-1) if isinstance(
diff --git a/tensorflow/contrib/tensor_forest/python/tensor_forest.py b/tensorflow/contrib/tensor_forest/python/tensor_forest.py
index 39fd703bb7..c0a0b03c29 100644
--- a/tensorflow/contrib/tensor_forest/python/tensor_forest.py
+++ b/tensorflow/contrib/tensor_forest/python/tensor_forest.py
@@ -343,7 +343,7 @@ class RandomForestGraphs(object):
def _bag_features(self, tree_num, input_data):
split_data = array_ops.split(
value=input_data, num_or_size_splits=self.params.num_features, axis=1)
- return array_ops.concat_v2(
+ return array_ops.concat(
[split_data[ind] for ind in self.params.bagged_features[tree_num]], 1)
def training_graph(self,
@@ -858,7 +858,7 @@ class RandomTreeGraphs(object):
state_ops.scatter_update(self.variables.accumulator_to_node_map,
a2n_map_updates[0], a2n_map_updates[1]))
- cleared_and_allocated_accumulators = array_ops.concat_v2(
+ cleared_and_allocated_accumulators = array_ops.concat(
[accumulators_cleared, accumulators_allocated], 0)
# Calculate values to put into scatter update for candidate counts.
@@ -889,7 +889,7 @@ class RandomTreeGraphs(object):
array_ops.zeros_like(accumulators_allocated,
dtype=dtypes.float32), 1),
[1, self.params.num_output_columns])
- accumulator_updates = array_ops.concat_v2([total_cleared, total_reset], 0)
+ accumulator_updates = array_ops.concat([total_cleared, total_reset], 0)
updates.append(state_ops.scatter_update(
self.variables.accumulator_sums,
cleared_and_allocated_accumulators, accumulator_updates))
diff --git a/tensorflow/contrib/tensor_forest/python/topn.py b/tensorflow/contrib/tensor_forest/python/topn.py
index ff9199cfd0..342b8144e5 100644
--- a/tensorflow/contrib/tensor_forest/python/topn.py
+++ b/tensorflow/contrib/tensor_forest/python/topn.py
@@ -122,8 +122,8 @@ class TopN(object):
self.sl_ids, ids)
u1 = state_ops.scatter_update(
self.sl_ids,
- array_ops.concat_v2([[0], shortlist_ids_to_remove], 0),
- array_ops.concat_v2(
+ array_ops.concat([[0], shortlist_ids_to_remove], 0),
+ array_ops.concat(
[new_length, array_ops.ones_like(shortlist_ids_to_remove) * -1],
0))
u2 = state_ops.scatter_update(
@@ -143,9 +143,9 @@ class TopN(object):
new_length = math_ops.reduce_sum(
math_ops.to_int32(math_ops.greater(new_scores, dtypes.float32.min)))
u1 = self.sl_ids.assign(
- math_ops.to_int64(array_ops.concat_v2([[new_length], new_ids], 0)))
+ math_ops.to_int64(array_ops.concat([[new_length], new_ids], 0)))
u2 = self.sl_scores.assign(
- array_ops.concat_v2([[smallest_new_score], new_scores], 0))
+ array_ops.concat([[smallest_new_score], new_scores], 0))
self.last_ops = [u1, u2]
return control_flow_ops.group(u1, u2)
diff --git a/tensorflow/contrib/training/python/training/resample.py b/tensorflow/contrib/training/python/training/resample.py
index 85c7584f7f..9f6411d57a 100644
--- a/tensorflow/contrib/training/python/training/resample.py
+++ b/tensorflow/contrib/training/python/training/resample.py
@@ -116,7 +116,7 @@ def resample_at_rate(inputs, rates, scope=None, seed=None, back_prop=False):
# concatenating zero-size TensorArrays" limitation:
def _empty_tensor_like(t):
result = array_ops.zeros(
- shape=(array_ops.concat_v2([[0], array_ops.shape(t)[1:]], 0)),
+ shape=(array_ops.concat([[0], array_ops.shape(t)[1:]], 0)),
dtype=t.dtype)
if t.get_shape().ndims is not None:
# preserve known shapes
diff --git a/tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py b/tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py
index 0fdbaf8594..a4f753acca 100644
--- a/tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py
+++ b/tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py
@@ -1079,7 +1079,7 @@ class SequenceQueueingStateSaver(object):
sequence_count, width=5, fill="0"), ":", self._key
],
name="StringJoinCurrentKeys")
- next_keys = array_ops.concat_v2(
+ next_keys = array_ops.concat(
[
array_ops.slice(current_keys, [1], [-1]), array_ops.expand_dims(
string_ops.string_join(
@@ -1094,7 +1094,7 @@ class SequenceQueueingStateSaver(object):
# Reshape sequences to sequence_count rows
array_ops.reshape(
v,
- array_ops.concat_v2(
+ array_ops.concat(
[
array_ops.expand_dims(sequence_count, 0),
array_ops.expand_dims(self._num_unroll, 0),
@@ -1114,7 +1114,7 @@ class SequenceQueueingStateSaver(object):
# Copy context to be sequence_count rows
array_ops.tile(
array_ops.expand_dims(v, 0),
- array_ops.concat_v2(
+ array_ops.concat(
[
array_ops.expand_dims(sequence_count, 0),
[1] * v.get_shape().ndims
@@ -1515,12 +1515,12 @@ def _padding(sequences, num_unroll):
# the shape of the paddings that we concat with the original value will be
# [num_paddings, tf.shape(value)[1], tf.shape(value)[2], ...,
# tf.shape(value)[tf.rank(value) - 1])]
- padding_shape = array_ops.concat_v2((num_paddings,
- array_ops.shape(value)[1:]), 0)
+ padding_shape = array_ops.concat((num_paddings, array_ops.shape(value)[1:]),
+ 0)
# 2. fill padding shape with dummies
dummy = array_ops.constant(
"" if value.dtype == dtypes.string else 0, dtype=value.dtype)
paddings = array_ops.fill(dims=padding_shape, value=dummy)
# 3. concat values with paddings
- padded_sequences[key] = array_ops.concat_v2([value, paddings], 0)
+ padded_sequences[key] = array_ops.concat([value, paddings], 0)
return length, padded_sequences
diff --git a/tensorflow/examples/udacity/6_lstm.ipynb b/tensorflow/examples/udacity/6_lstm.ipynb
index 7e78c5328f..b17e70be95 100644
--- a/tensorflow/examples/udacity/6_lstm.ipynb
+++ b/tensorflow/examples/udacity/6_lstm.ipynb
@@ -573,10 +573,10 @@
" with tf.control_dependencies([saved_output.assign(output),\n",
" saved_state.assign(state)]):\n",
" # Classifier.\n",
- " logits = tf.nn.xw_plus_b(tf.concat_v2(outputs, 0), w, b)\n",
+ " logits = tf.nn.xw_plus_b(tf.concat(outputs, 0), w, b)\n",
" loss = tf.reduce_mean(\n",
" tf.nn.softmax_cross_entropy_with_logits(\n",
- " labels=tf.concat_v2(train_labels, 0), logits=logits))\n",
+ " labels=tf.concat(train_labels, 0), logits=logits))\n",
"\n",
" # Optimizer.\n",
" global_step = tf.Variable(0)\n",
diff --git a/tensorflow/python/framework/function_test.py b/tensorflow/python/framework/function_test.py
index 8105be52bf..3407a0fdbc 100644
--- a/tensorflow/python/framework/function_test.py
+++ b/tensorflow/python/framework/function_test.py
@@ -713,7 +713,7 @@ class UnrollLSTMTest(test.TestCase):
# Helper to construct a LSTM cell graph.
@classmethod
def LSTMCell(cls, x, mprev, cprev, weights):
- xm = array_ops.concat_v2([x, mprev], 1)
+ xm = array_ops.concat([x, mprev], 1)
i_i, i_g, f_g, o_g = array_ops.split(
value=math_ops.matmul(xm, weights), num_or_size_splits=4, axis=1)
new_c = math_ops.sigmoid(f_g) * cprev + math_ops.sigmoid(
diff --git a/tensorflow/python/framework/tensor_util_test.py b/tensorflow/python/framework/tensor_util_test.py
index 9b68067708..16aab707e7 100644
--- a/tensorflow/python/framework/tensor_util_test.py
+++ b/tensorflow/python/framework/tensor_util_test.py
@@ -642,18 +642,18 @@ class ConstantValueTest(test.TestCase):
def testConcat(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
- tf_val = array_ops.concat_v2(
+ tf_val = array_ops.concat(
[np_val[0:1, :, :], np_val[1:2, :, :], np_val[2:3, :, :]], 0)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val, c_val)
- tf_val = array_ops.concat_v2(
+ tf_val = array_ops.concat(
[np_val[0, :, :], np_val[1, :, :], np_val[2, :, :]],
array_ops.placeholder(dtypes.int32))
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
- tf_val = array_ops.concat_v2([
+ tf_val = array_ops.concat([
np_val[0, :, :], array_ops.placeholder(dtypes.float32), np_val[2, :, :]
], 1)
c_val = tensor_util.constant_value(tf_val)
@@ -698,13 +698,13 @@ class ConstantValueAsShapeTest(test.TestCase):
self.assertEqual([16, 37, None], c_val.as_list())
def testConcat(self):
- tf_val = array_ops.concat_v2(
+ tf_val = array_ops.concat(
[[16, 37], array_ops.placeholder(
dtypes.int32, shape=(2,))], 0)
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, None], c_val.as_list())
- tf_val = array_ops.concat_v2(
+ tf_val = array_ops.concat(
[[16, 37], array_ops.placeholder(
dtypes.int32, shape=(1,)), [48]], 0)
c_val = tensor_util.constant_value_as_shape(tf_val)
diff --git a/tensorflow/python/kernel_tests/confusion_matrix_test.py b/tensorflow/python/kernel_tests/confusion_matrix_test.py
index c7e6d82d9b..15fa23efc9 100644
--- a/tensorflow/python/kernel_tests/confusion_matrix_test.py
+++ b/tensorflow/python/kernel_tests/confusion_matrix_test.py
@@ -71,10 +71,10 @@ class ConfusionMatrixTest(test.TestCase):
pos = random_ops.random_normal(
[20], mean=m_pos, stddev=s, dtype=dtypes.float32)
- data = array_ops.concat_v2([neg, pos], 0)
+ data = array_ops.concat([neg, pos], 0)
data = math_ops.cast(math_ops.round(data), tf_dtype)
data = math_ops.minimum(math_ops.maximum(data, 0), 1)
- lab = array_ops.concat_v2(
+ lab = array_ops.concat(
[
array_ops.zeros(
[20], dtype=tf_dtype), array_ops.ones(
diff --git a/tensorflow/python/kernel_tests/control_flow_ops_py_test.py b/tensorflow/python/kernel_tests/control_flow_ops_py_test.py
index 7eed59d177..bd35c2edaa 100644
--- a/tensorflow/python/kernel_tests/control_flow_ops_py_test.py
+++ b/tensorflow/python/kernel_tests/control_flow_ops_py_test.py
@@ -734,7 +734,7 @@ class ControlFlowTest(test.TestCase):
c = array_ops.strided_slice(x,
array_ops.expand_dims(i, 0),
[1] + array_ops.expand_dims(i, 0))
- o = array_ops.concat_v2([o, c], 0)
+ o = array_ops.concat([o, c], 0)
i = math_ops.add(i, 1)
return [i, c, o]
@@ -821,7 +821,7 @@ class ControlFlowTest(test.TestCase):
def b(i, j):
new_i = math_ops.add(i, 1)
- new_j = array_ops.concat_v2([j, j], 0)
+ new_j = array_ops.concat([j, j], 0)
return [new_i, new_j]
r = control_flow_ops.while_loop(
@@ -1847,7 +1847,7 @@ class ControlFlowTest(test.TestCase):
return i < 2
def body(i, h):
- return i + 1, array_ops.concat_v2([h, x], 0)
+ return i + 1, array_ops.concat([h, x], 0)
_, h = control_flow_ops.while_loop(
condition, body, [i0, h0],
diff --git a/tensorflow/python/kernel_tests/embedding_ops_test.py b/tensorflow/python/kernel_tests/embedding_ops_test.py
index 701a97adbb..a30f4abdf4 100644
--- a/tensorflow/python/kernel_tests/embedding_ops_test.py
+++ b/tensorflow/python/kernel_tests/embedding_ops_test.py
@@ -169,7 +169,7 @@ def _EmbeddingParamsAsPartitionedVariable(num_shards,
partitioned_variable = variable_scope.get_variable(
"p",
shape=[vocab_size] + shape,
- initializer=array_ops.concat_v2([params[p_i.name] for p_i in p], 0),
+ initializer=array_ops.concat([params[p_i.name] for p_i in p], 0),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=num_shards, min_slice_size=1))
return p, partitioned_variable, params, feed_dict
diff --git a/tensorflow/python/kernel_tests/large_concat_op_test.py b/tensorflow/python/kernel_tests/large_concat_op_test.py
index 6619a2d94b..66afb6ec01 100644
--- a/tensorflow/python/kernel_tests/large_concat_op_test.py
+++ b/tensorflow/python/kernel_tests/large_concat_op_test.py
@@ -31,7 +31,7 @@ class LargeConcatOpTest(test.TestCase):
with ops.device("/cpu:0"):
a = array_ops.ones([2**31 + 6], dtype=dtypes.int8)
b = array_ops.zeros([1024], dtype=dtypes.int8)
- onezeros = array_ops.concat_v2([a, b], 0)
+ onezeros = array_ops.concat([a, b], 0)
with self.test_session(use_gpu=False):
# TODO(dga): Add more depth to this test to validate correctness,
# not just non-crashingness, once other large tensor fixes have gone in.
diff --git a/tensorflow/python/kernel_tests/metrics_test.py b/tensorflow/python/kernel_tests/metrics_test.py
index 1b7cdc590d..0ea14e82bc 100644
--- a/tensorflow/python/kernel_tests/metrics_test.py
+++ b/tensorflow/python/kernel_tests/metrics_test.py
@@ -3277,14 +3277,14 @@ class MeanIOUTest(test.TestCase):
self.assertAlmostEqual(desired_output, miou.eval())
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
- predictions = array_ops.concat_v2(
+ predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
- labels = array_ops.concat_v2(
+ labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
@@ -3321,14 +3321,14 @@ class MeanIOUTest(test.TestCase):
self.assertEqual(0., miou.eval())
def testResultsWithSomeMissing(self):
- predictions = array_ops.concat_v2(
+ predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
- labels = array_ops.concat_v2(
+ labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
@@ -3336,7 +3336,7 @@ class MeanIOUTest(test.TestCase):
],
0)
num_classes = 2
- weights = array_ops.concat_v2(
+ weights = array_ops.concat(
[
constant_op.constant(
0, shape=[1]), constant_op.constant(
diff --git a/tensorflow/python/kernel_tests/partitioned_variables_test.py b/tensorflow/python/kernel_tests/partitioned_variables_test.py
index 0d5a9339bc..cc8622a616 100644
--- a/tensorflow/python/kernel_tests/partitioned_variables_test.py
+++ b/tensorflow/python/kernel_tests/partitioned_variables_test.py
@@ -302,7 +302,7 @@ class PartitionedVariablesTestCase(test.TestCase):
rnd_par = constant_op.constant([1, 2, 3, 4])
vs = partitioned_variables.create_partitioned_variables([4], [4], rnd_par)
variables.global_variables_initializer().run()
- val = array_ops.concat_v2(vs, 0).eval()
+ val = array_ops.concat(vs, 0).eval()
rnd = rnd_par.eval()
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.int32] * 4, [v.dtype.base_dtype for v in vs])
@@ -314,7 +314,7 @@ class PartitionedVariablesTestCase(test.TestCase):
vs = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
variables.global_variables_initializer().run()
- val = array_ops.concat_v2(vs, 1).eval()
+ val = array_ops.concat(vs, 1).eval()
rnd = rnd_par.eval()
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.int32] * 2, [v.dtype.base_dtype for v in vs])
@@ -380,7 +380,7 @@ class PartitionedVariablesTestCase(test.TestCase):
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, 10], rnd.initialized_value())
variables.global_variables_initializer().run()
- val = array_ops.concat_v2(vs, 1).eval()
+ val = array_ops.concat(vs, 1).eval()
rnd = rnd.eval()
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.float32] * 10, [v.dtype.base_dtype for v in vs])
@@ -423,7 +423,7 @@ class PartitionedVariablesTestCase(test.TestCase):
]
]
for i, vs in enumerate(var_lists):
- var_val = array_ops.concat_v2(vs, 1).eval()
+ var_val = array_ops.concat(vs, 1).eval()
self.assertAllClose(rnd_val, var_val)
self.assertEqual([dtypes.float64] * len(vs),
[v.dtype.base_dtype for v in vs])
@@ -436,7 +436,7 @@ class PartitionedVariablesTestCase(test.TestCase):
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, 1], rnd.initialized_value())
variables.global_variables_initializer().run()
- val = array_ops.concat_v2(vs, 0).eval()
+ val = array_ops.concat(vs, 0).eval()
rnd = rnd.eval()
self.assertAllClose(rnd, val)
self._TestSaveSpec(vs, ["10 43 0,10:0,43"])
@@ -447,7 +447,7 @@ class PartitionedVariablesTestCase(test.TestCase):
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [10, 1], rnd.initialized_value())
variables.global_variables_initializer().run()
- val = array_ops.concat_v2(vs, 0).eval()
+ val = array_ops.concat(vs, 0).eval()
rnd = rnd.eval()
self.assertAllClose(rnd, val)
self._TestSaveSpec(vs, [
@@ -467,7 +467,7 @@ class PartitionedVariablesTestCase(test.TestCase):
slice0 = _IotaInitializer([5, 5])
slice1 = _IotaInitializer([4, 5])
slice2 = _IotaInitializer([4, 5])
- val = array_ops.concat_v2(vs, 0).eval()
+ val = array_ops.concat(vs, 0).eval()
self.assertAllClose(slice0 + slice1 + slice2, val)
self._TestSaveSpec(vs, ["13 5 0,5:0,5", "13 5 5,4:0,5", "13 5 9,4:0,5"])
diff --git a/tensorflow/python/kernel_tests/reshape_op_test.py b/tensorflow/python/kernel_tests/reshape_op_test.py
index ad852d76c2..67aeb67d8d 100644
--- a/tensorflow/python/kernel_tests/reshape_op_test.py
+++ b/tensorflow/python/kernel_tests/reshape_op_test.py
@@ -129,10 +129,10 @@ class ReshapeTest(test.TestCase):
y = array_ops.reshape(x, [array_ops.placeholder(dtypes.int32), 37])
self.assertEqual([None, 37], y.get_shape().as_list())
- # Unknown input shape, partial new shape using `tf.concat_v2()`.
+ # Unknown input shape, partial new shape using `tf.concat()`.
y = array_ops.reshape(
x,
- array_ops.concat_v2(
+ array_ops.concat(
[array_ops.placeholder(
dtypes.int32, shape=(2,)), [37, 42]], 0))
self.assertEqual([None, None, 37, 42], y.get_shape().as_list())
diff --git a/tensorflow/python/kernel_tests/svd_op_test.py b/tensorflow/python/kernel_tests/svd_op_test.py
index 3f6b6958fc..fd49e1a6cc 100644
--- a/tensorflow/python/kernel_tests/svd_op_test.py
+++ b/tensorflow/python/kernel_tests/svd_op_test.py
@@ -87,10 +87,10 @@ def _GetSvdOpTest(dtype_, shape_, use_static_shape_):
if full_matrices:
if m > n:
zeros = array_ops.zeros(batch_shape + (m - n, n), dtype=dtype_)
- diag_s = array_ops.concat_v2([diag_s, zeros], a.ndim - 2)
+ diag_s = array_ops.concat([diag_s, zeros], a.ndim - 2)
elif n > m:
zeros = array_ops.zeros(batch_shape + (m, n - m), dtype=dtype_)
- diag_s = array_ops.concat_v2([diag_s, zeros], a.ndim - 1)
+ diag_s = array_ops.concat([diag_s, zeros], a.ndim - 1)
a_recon = math_ops.matmul(u, diag_s)
a_recon = math_ops.matmul(a_recon, v, adjoint_b=True)
self.assertAllClose(a_recon.eval(), a, rtol=tol, atol=tol)
diff --git a/tensorflow/python/ops/array_grad.py b/tensorflow/python/ops/array_grad.py
index f8ddb3e0aa..50aab4fad5 100644
--- a/tensorflow/python/ops/array_grad.py
+++ b/tensorflow/python/ops/array_grad.py
@@ -69,13 +69,10 @@ def _ConcatGradHelper(op, grad, start_value_index, end_value_index, dim_index):
# Make a vector of length equal to the input's dimensions,
# with 0's everywhere and 1 in the concat dim position.
# Note: Can't use sparse_to_dense since it isn't GPU-capable (for now)
- mask = array_ops.concat_v2(
- [array_ops.fill(
- array_ops.expand_dims(concat_dim, 0), 0),
- [1],
- array_ops.fill(
- shape_of_shape - concat_dim - 1, 0)],
- 0)
+ mask = array_ops.concat([
+ array_ops.fill(array_ops.expand_dims(concat_dim, 0), 0), [1],
+ array_ops.fill(shape_of_shape - concat_dim - 1, 0)
+ ], 0)
begin = array_ops.fill(shape_of_shape, 0)
return mask, begin
@@ -151,10 +148,8 @@ def _ConcatGradHelper(op, grad, start_value_index, end_value_index, dim_index):
mask, begin = _CreateDenseMaskAndBegin(sizes, non_neg_concat_dim)
for size in sizes:
new_values = array_ops.slice(
- grad.values,
- begin,
- array_ops.concat_v2(
- [[-1], array_ops.slice(size, [1], [-1])], 0))
+ grad.values, begin,
+ array_ops.concat([[-1], array_ops.slice(size, [1], [-1])], 0))
out_grads.append(
ops.IndexedSlices(new_values, grad.indices, size))
# Lint complains begin = begin + ...
@@ -223,7 +218,7 @@ def _SliceGrad(op, grad):
before_pad = array_ops.reshape(begin_vec, shape)
after_pad = array_ops.reshape(
array_ops.shape(input_vec) - slice_size - begin_vec, shape)
- paddings = array_ops.concat_v2([before_pad, after_pad], 1)
+ paddings = array_ops.concat([before_pad, after_pad], 1)
return array_ops.pad(grad, paddings), None, None
@@ -269,12 +264,12 @@ def _StridedSliceGradGrad(op, grad):
@ops.RegisterGradient("Split")
def _SplitGrad(op, *grads):
- return None, array_ops.concat_v2(list(grads), op.inputs[0])
+ return None, array_ops.concat(list(grads), op.inputs[0])
@ops.RegisterGradient("SplitV")
def _SplitVGrad(op, *grads):
- returnval = array_ops.concat_v2(list(grads), op.inputs[2])
+ returnval = array_ops.concat(list(grads), op.inputs[2])
returnval = [returnval] + [None,] * (len(op.inputs) - 1)
print(returnval)
return returnval
@@ -321,7 +316,7 @@ def _MatrixSetDiagGrad(op, grad):
batch_shape = array_ops.slice(grad_shape, [0], [grad_rank - 2])
matrix_shape = array_ops.slice(grad_shape, [grad_rank - 2], [2])
min_dim = math_ops.reduce_min(matrix_shape)
- diag_shape = array_ops.concat_v2([batch_shape, [min_dim]], 0)
+ diag_shape = array_ops.concat([batch_shape, [min_dim]], 0)
grad_input = array_ops.matrix_set_diag(
grad, array_ops.zeros(
diag_shape, dtype=grad.dtype))
@@ -359,7 +354,7 @@ def _GatherGrad(op, grad):
# Build appropriately shaped IndexedSlices
indices = op.inputs[1]
size = array_ops.expand_dims(array_ops.size(indices), 0)
- values_shape = array_ops.concat_v2([size, params_shape[1:]], 0)
+ values_shape = array_ops.concat([size, params_shape[1:]], 0)
values = array_ops.reshape(grad, values_shape)
indices = array_ops.reshape(indices, size)
return [ops.IndexedSlices(values, indices, params_shape), None]
diff --git a/tensorflow/python/ops/concat_benchmark.py b/tensorflow/python/ops/concat_benchmark.py
index bed295c312..094f8bb2dc 100644
--- a/tensorflow/python/ops/concat_benchmark.py
+++ b/tensorflow/python/ops/concat_benchmark.py
@@ -69,7 +69,7 @@ def build_graph(device, input_shape, variable, num_inputs, axis, grad):
]) for _ in range(num_inputs)
]
- outputs = [array_ops.concat_v2(inputs, axis) for _ in range(100)]
+ outputs = [array_ops.concat(inputs, axis) for _ in range(100)]
if grad:
return control_flow_ops.group(*list(
itertools.chain.from_iterable([
diff --git a/tensorflow/python/ops/control_flow_ops.py b/tensorflow/python/ops/control_flow_ops.py
index eeda6940a6..e3871431f3 100644
--- a/tensorflow/python/ops/control_flow_ops.py
+++ b/tensorflow/python/ops/control_flow_ops.py
@@ -2284,7 +2284,7 @@ class WhileContext(ControlFlowContext):
if self.outer_context: self.outer_context.Exit()
else:
values_shape = array_ops.shape_internal(op.inputs[0], optimize=False)[1:]
- values_shape = array_ops.concat_v2([[1], values_shape], 0)
+ values_shape = array_ops.concat([[1], values_shape], 0)
values_acc = array_ops.zeros(values_shape, dtype=values.dtype)
indices_acc = constant_op.constant([0], indices.dtype)
shape_acc = None
@@ -2316,7 +2316,7 @@ class WhileContext(ControlFlowContext):
# The actual accumulation.
acc_indexed_slices = [
- array_ops.concat_v2([xa[1], xv], 0)
+ array_ops.concat([xa[1], xv], 0)
for xa, xv in zip(switch_acc[:2], [indices, values])
]
if shape_acc is not None:
@@ -2601,7 +2601,7 @@ def while_loop(cond, body, loop_vars, shape_invariants=None,
i0 = tf.constant(0)
m0 = tf.ones([2, 2])
c = lambda i, m: i < 10
- b = lambda i, m: [i+1, tf.concat_v2([m, m], axis=0)]
+ b = lambda i, m: [i+1, tf.concat([m, m], axis=0)]
tf.while_loop(
c, b, loop_vars=[i0, m0],
shape_invariants=[i0.get_shape(), tf.TensorShape([None, 2])])
diff --git a/tensorflow/python/ops/embedding_ops.py b/tensorflow/python/ops/embedding_ops.py
index 95bba3efc5..80507024d2 100644
--- a/tensorflow/python/ops/embedding_ops.py
+++ b/tensorflow/python/ops/embedding_ops.py
@@ -181,7 +181,7 @@ def embedding_lookup(params, ids, partition_strategy="mod", name=None,
element_shape = element_shape.merge_with(p.get_shape()[1:])
if element_shape.is_fully_defined():
ret = array_ops.reshape(ret,
- array_ops.concat_v2(
+ array_ops.concat(
[array_ops.shape(ids), element_shape], 0))
else:
# It's important that we compute params[0].shape on the right device
@@ -189,7 +189,7 @@ def embedding_lookup(params, ids, partition_strategy="mod", name=None,
with ops.colocate_with(params[0]):
params_shape = array_ops.shape(params[0])
ret = array_ops.reshape(ret,
- array_ops.concat_v2([
+ array_ops.concat([
array_ops.shape(ids),
array_ops.slice(params_shape, [1], [-1])
], 0))
@@ -321,8 +321,8 @@ def embedding_lookup_sparse(params, sp_ids, sp_weights,
# Reshape weights to allow broadcast
ones = array_ops.fill(
array_ops.expand_dims(array_ops.rank(embeddings) - 1, 0), 1)
- bcast_weights_shape = array_ops.concat_v2(
- [array_ops.shape(weights), ones], 0)
+ bcast_weights_shape = array_ops.concat([array_ops.shape(weights), ones],
+ 0)
orig_weights_shape = weights.get_shape()
weights = array_ops.reshape(weights, bcast_weights_shape)
diff --git a/tensorflow/python/ops/gradients_impl.py b/tensorflow/python/ops/gradients_impl.py
index 7017640b7a..20474d483d 100644
--- a/tensorflow/python/ops/gradients_impl.py
+++ b/tensorflow/python/ops/gradients_impl.py
@@ -770,8 +770,8 @@ def _AggregatedGrads(grads, op, loop_state, aggregation_method=None):
# Form IndexedSlices out of the concatenated values and
# indices.
out_grads[i] = ops.IndexedSlices(
- array_ops.concat_v2([x.values for x in out_grad], 0),
- array_ops.concat_v2([x.indices for x in out_grad], 0),
+ array_ops.concat([x.values for x in out_grad], 0),
+ array_ops.concat([x.indices for x in out_grad], 0),
out_grad[0].dense_shape)
else: # not out_grad
# out_grads[i] is [], thus its aggregation is simply None.
diff --git a/tensorflow/python/ops/gradients_test.py b/tensorflow/python/ops/gradients_test.py
index 3c83e488dc..c33a002086 100644
--- a/tensorflow/python/ops/gradients_test.py
+++ b/tensorflow/python/ops/gradients_test.py
@@ -112,9 +112,9 @@ class GradientsTest(test_util.TensorFlowTestCase):
t2 = constant(2.0)
t3 = array_ops.stack([t1, t2])
t4 = constant([1.0])
- t5 = array_ops.concat_v2([t4, t3], 0)
+ t5 = array_ops.concat([t4, t3], 0)
t6 = constant([2.0])
- t7 = array_ops.concat_v2([t5, t6], 0)
+ t7 = array_ops.concat([t5, t6], 0)
self._assertOpListEqual([t7.op, t5.op, t4.op],
_OpsBetween(g, [t7.op], [t4.op]))
@@ -123,10 +123,10 @@ class GradientsTest(test_util.TensorFlowTestCase):
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.stack([t1, t2])
- t4 = array_ops.concat_v2([t3, t3, t3], 0)
+ t4 = array_ops.concat([t3, t3, t3], 0)
t5 = constant([1.0])
- t6 = array_ops.concat_v2([t4, t5], 0)
- t7 = array_ops.concat_v2([t6, t3], 0)
+ t6 = array_ops.concat([t4, t5], 0)
+ t7 = array_ops.concat([t6, t3], 0)
self._assertOpListEqual([t6.op, t4.op, t3.op],
_OpsBetween(g, [t6.op], [t3.op]))
self._assertOpListEqual([t7.op, t6.op, t5.op, t4.op, t3.op, t1.op],
diff --git a/tensorflow/python/ops/image_ops_impl.py b/tensorflow/python/ops/image_ops_impl.py
index 39b7c8892d..2ca9be6187 100644
--- a/tensorflow/python/ops/image_ops_impl.py
+++ b/tensorflow/python/ops/image_ops_impl.py
@@ -1041,7 +1041,7 @@ def grayscale_to_rgb(images, name=None):
shape_list = (
[array_ops.ones(rank_1,
dtype=dtypes.int32)] + [array_ops.expand_dims(3, 0)])
- multiples = array_ops.concat_v2(shape_list, 0)
+ multiples = array_ops.concat(shape_list, 0)
rgb = array_ops.tile(images, multiples, name=name)
rgb.set_shape(images.get_shape()[:-1].concatenate([3]))
return rgb
@@ -1124,7 +1124,7 @@ def adjust_hue(image, delta, name=None):
# floating point number since delta is [-0.5, 0.5].
hue = math_ops.mod(hue + (delta + 1.), 1.)
- hsv_altered = array_ops.concat_v2([hue, saturation, value], 2)
+ hsv_altered = array_ops.concat([hue, saturation, value], 2)
rgb_altered = gen_image_ops.hsv_to_rgb(hsv_altered)
else:
rgb_altered = gen_image_ops.adjust_hue(flt_image, delta)
@@ -1210,7 +1210,7 @@ def adjust_saturation(image, saturation_factor, name=None):
saturation *= saturation_factor
saturation = clip_ops.clip_by_value(saturation, 0.0, 1.0)
- hsv_altered = array_ops.concat_v2([hue, saturation, value], 2)
+ hsv_altered = array_ops.concat([hue, saturation, value], 2)
rgb_altered = gen_image_ops.hsv_to_rgb(hsv_altered)
return convert_image_dtype(rgb_altered, orig_dtype)
diff --git a/tensorflow/python/ops/linalg_grad.py b/tensorflow/python/ops/linalg_grad.py
index 3254d78310..6059c75466 100644
--- a/tensorflow/python/ops/linalg_grad.py
+++ b/tensorflow/python/ops/linalg_grad.py
@@ -50,7 +50,7 @@ def _MatrixDeterminantGrad(op, grad):
c = op.outputs[0]
a_adj_inv = linalg_ops.matrix_inverse(a, adjoint=True)
multipliers = array_ops.reshape(
- grad * c, array_ops.concat_v2([array_ops.shape(c), [1, 1]], 0))
+ grad * c, array_ops.concat([array_ops.shape(c), [1, 1]], 0))
return multipliers * a_adj_inv
diff --git a/tensorflow/python/ops/linalg_ops.py b/tensorflow/python/ops/linalg_ops.py
index e285f3a32b..d331017f95 100644
--- a/tensorflow/python/ops/linalg_ops.py
+++ b/tensorflow/python/ops/linalg_ops.py
@@ -124,13 +124,13 @@ def eye(num_rows,
diag_size = num_rows
else:
diag_size = math_ops.minimum(num_rows, num_columns)
- diag_shape = array_ops.concat_v2((batch_shape, [diag_size]), 0)
+ diag_shape = array_ops.concat((batch_shape, [diag_size]), 0)
diag_ones = array_ops.ones(diag_shape, dtype=dtype)
if num_columns is None:
return array_ops.matrix_diag(diag_ones)
else:
- shape = array_ops.concat_v2((batch_shape, [num_rows, num_columns]), 0)
+ shape = array_ops.concat((batch_shape, [num_rows, num_columns]), 0)
zero_matrix = array_ops.zeros(shape, dtype=dtype)
return array_ops.matrix_set_diag(zero_matrix, diag_ones)
diff --git a/tensorflow/python/ops/math_grad.py b/tensorflow/python/ops/math_grad.py
index 833ba264e1..6f4473885e 100644
--- a/tensorflow/python/ops/math_grad.py
+++ b/tensorflow/python/ops/math_grad.py
@@ -125,7 +125,7 @@ def _ProdGrad(op, grad):
reduced = math_ops.cast(reduction_indices, dtypes.int32)
idx = math_ops.range(0, array_ops.rank(op.inputs[0]))
other, _ = array_ops.setdiff1d(idx, reduced)
- perm = array_ops.concat_v2([reduced, other], 0)
+ perm = array_ops.concat([reduced, other], 0)
reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced))
other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other))
permuted = array_ops.transpose(op.inputs[0], perm)
@@ -153,7 +153,7 @@ def _SegmentSumGrad(op, grad):
def _SegmentMeanGrad(op, grad):
"""Gradient for SegmentMean."""
input_rank = array_ops.rank(op.inputs[0])
- ones_shape = array_ops.concat_v2([
+ ones_shape = array_ops.concat([
array_ops.shape(op.inputs[1]),
array_ops.fill(array_ops.expand_dims(input_rank - 1, 0), 1)
], 0)
diff --git a/tensorflow/python/ops/math_grad_test.py b/tensorflow/python/ops/math_grad_test.py
index 8b0bc6b4f1..1fa15957b0 100644
--- a/tensorflow/python/ops/math_grad_test.py
+++ b/tensorflow/python/ops/math_grad_test.py
@@ -100,14 +100,14 @@ class MinOrMaxGradientTest(test.TestCase):
def testMinGradient(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
- outputs = math_ops.reduce_min(array_ops.concat_v2([inputs, inputs], 0))
+ outputs = math_ops.reduce_min(array_ops.concat([inputs, inputs], 0))
with self.test_session():
error = gradient_checker.compute_gradient_error(inputs, [1], outputs, [])
self.assertLess(error, 1e-4)
def testMaxGradient(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
- outputs = math_ops.reduce_max(array_ops.concat_v2([inputs, inputs], 0))
+ outputs = math_ops.reduce_max(array_ops.concat([inputs, inputs], 0))
with self.test_session():
error = gradient_checker.compute_gradient_error(inputs, [1], outputs, [])
self.assertLess(error, 1e-4)
@@ -135,7 +135,7 @@ class SegmentMinOrMaxGradientTest(test.TestCase):
def testSegmentMinGradientWithTies(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
- data = array_ops.concat_v2([inputs, inputs], 0)
+ data = array_ops.concat([inputs, inputs], 0)
segment_ids = constant_op.constant([0, 0], dtype=dtypes.int64)
segment_min = math_ops.segment_min(data, segment_ids)
with self.test_session():
@@ -145,7 +145,7 @@ class SegmentMinOrMaxGradientTest(test.TestCase):
def testSegmentMaxGradientWithTies(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
- data = array_ops.concat_v2([inputs, inputs], 0)
+ data = array_ops.concat([inputs, inputs], 0)
segment_ids = constant_op.constant([0, 0], dtype=dtypes.int64)
segment_max = math_ops.segment_max(data, segment_ids)
with self.test_session():
diff --git a/tensorflow/python/ops/math_ops.py b/tensorflow/python/ops/math_ops.py
index 8d469e8071..b008742a3f 100644
--- a/tensorflow/python/ops/math_ops.py
+++ b/tensorflow/python/ops/math_ops.py
@@ -2334,12 +2334,12 @@ def tensordot(a, b, axes, name=None):
axes_dims = array_ops.gather(shape_a, axes)
prod_free_dims = reduce_prod(free_dims)
prod_axes_dims = reduce_prod(axes_dims)
- perm = array_ops.concat_v2([axes_dims, free_dims], 0)
+ perm = array_ops.concat([axes_dims, free_dims], 0)
if flipped:
- perm = array_ops.concat_v2([axes, free], 0)
+ perm = array_ops.concat([axes, free], 0)
new_shape = array_ops.stack([prod_axes_dims, prod_free_dims])
else:
- perm = array_ops.concat_v2([free, axes], 0)
+ perm = array_ops.concat([free, axes], 0)
new_shape = array_ops.stack([prod_free_dims, prod_axes_dims])
reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
return reshaped_a, free_dims
@@ -2384,5 +2384,4 @@ def tensordot(a, b, axes, name=None):
a_free_dims = ops.convert_to_tensor(a_free_dims)
b_free_dims = ops.convert_to_tensor(b_free_dims)
return array_ops.reshape(
- ab_matmul, array_ops.concat_v2([a_free_dims, b_free_dims], 0),
- name=name)
+ ab_matmul, array_ops.concat([a_free_dims, b_free_dims], 0), name=name)
diff --git a/tensorflow/python/ops/metrics_impl.py b/tensorflow/python/ops/metrics_impl.py
index 392bb5ea18..3de4e1c12c 100644
--- a/tensorflow/python/ops/metrics_impl.py
+++ b/tensorflow/python/ops/metrics_impl.py
@@ -122,7 +122,7 @@ def _maybe_expand_labels(labels, predictions):
array_ops.size(labels.dense_shape) + 1),
lambda: sparse_ops.sparse_reshape( # pylint: disable=g-long-lambda
labels,
- shape=array_ops.concat_v2((labels.dense_shape, (1,)), 0),
+ shape=array_ops.concat((labels.dense_shape, (1,)), 0),
name=scope),
lambda: labels)
@@ -2090,7 +2090,7 @@ def _expand_and_tile(tensor, multiple, dim=0, name=None):
array_ops.size(tensor.dense_shape) + dim, [1])
else:
expand_dims = [dim]
- expanded_shape = array_ops.concat_v2(
+ expanded_shape = array_ops.concat(
(array_ops.slice(tensor.dense_shape, [0], expand_dims), [1],
array_ops.slice(tensor.dense_shape, expand_dims, [-1])),
0,
@@ -2108,7 +2108,7 @@ def _expand_and_tile(tensor, multiple, dim=0, name=None):
if multiple == 1:
return expanded
ones = array_ops.ones_like(array_ops.shape(tensor))
- tile_multiples = array_ops.concat_v2(
+ tile_multiples = array_ops.concat(
(ones[:dim], (multiple,), ones[dim:]), 0, name='multiples')
return array_ops.tile(expanded, tile_multiples, name=scope)
diff --git a/tensorflow/python/ops/nn_grad.py b/tensorflow/python/ops/nn_grad.py
index a5da0cf0cd..6147cdf221 100644
--- a/tensorflow/python/ops/nn_grad.py
+++ b/tensorflow/python/ops/nn_grad.py
@@ -226,15 +226,15 @@ def _BiasAddGradGrad(op, received_grad):
bias_shape = array_ops.shape(received_grad)
if data_format == b"NCHW":
- expanded_shape = array_ops.concat_v2([
- array_ops.ones_like(shape[:-3]), bias_shape, array_ops.ones_like(shape[
- -2:])
+ expanded_shape = array_ops.concat([
+ array_ops.ones_like(shape[:-3]), bias_shape,
+ array_ops.ones_like(shape[-2:])
], 0)
- tile_mults = array_ops.concat_v2([shape[:-3], [1], shape[-2:]], 0)
+ tile_mults = array_ops.concat([shape[:-3], [1], shape[-2:]], 0)
else:
- expanded_shape = array_ops.concat_v2(
+ expanded_shape = array_ops.concat(
[array_ops.ones_like(shape[:-1]), bias_shape], 0)
- tile_mults = array_ops.concat_v2([shape[:-1], [1]], 0)
+ tile_mults = array_ops.concat([shape[:-1], [1]], 0)
expanded_grad = array_ops.reshape(received_grad, expanded_shape)
return array_ops.tile(expanded_grad, tile_mults)
diff --git a/tensorflow/python/ops/nn_impl.py b/tensorflow/python/ops/nn_impl.py
index d8f95d9832..f621bc2d90 100644
--- a/tensorflow/python/ops/nn_impl.py
+++ b/tensorflow/python/ops/nn_impl.py
@@ -952,7 +952,7 @@ def _compute_sampled_logits(weights,
# labels_flat is a [batch_size * num_true] tensor
# sampled is a [num_sampled] int tensor
- all_ids = array_ops.concat_v2([labels_flat, sampled], 0)
+ all_ids = array_ops.concat([labels_flat, sampled], 0)
# weights shape is [num_classes, dim]
all_w = embedding_ops.embedding_lookup(
@@ -968,14 +968,14 @@ def _compute_sampled_logits(weights,
# true_w shape is [batch_size * num_true, dim]
# row_wise_dots is [batch_size, num_true, dim]
dim = array_ops.shape(true_w)[1:2]
- new_true_w_shape = array_ops.concat_v2([[-1, num_true], dim], 0)
+ new_true_w_shape = array_ops.concat([[-1, num_true], dim], 0)
row_wise_dots = math_ops.multiply(
array_ops.expand_dims(inputs, 1),
array_ops.reshape(true_w, new_true_w_shape))
# We want the row-wise dot plus biases which yields a
# [batch_size, num_true] tensor of true_logits.
dots_as_matrix = array_ops.reshape(row_wise_dots,
- array_ops.concat_v2([[-1], dim], 0))
+ array_ops.concat([[-1], dim], 0))
true_logits = array_ops.reshape(_sum_rows(dots_as_matrix), [-1, num_true])
true_b = array_ops.reshape(true_b, [-1, num_true])
true_logits += true_b
@@ -1003,10 +1003,10 @@ def _compute_sampled_logits(weights,
acc_indices_2d = array_ops.reshape(acc_indices, [-1, 1])
acc_ids_2d_int32 = array_ops.reshape(
math_ops.cast(acc_ids, dtypes.int32), [-1, 1])
- sparse_indices = array_ops.concat_v2([acc_indices_2d, acc_ids_2d_int32],
- 1, "sparse_indices")
+ sparse_indices = array_ops.concat([acc_indices_2d, acc_ids_2d_int32], 1,
+ "sparse_indices")
# Create sampled_logits_shape = [batch_size, num_sampled]
- sampled_logits_shape = array_ops.concat_v2(
+ sampled_logits_shape = array_ops.concat(
[array_ops.shape(labels)[:1], array_ops.expand_dims(num_sampled, 0)],
0)
if sampled_logits.dtype != acc_weights.dtype:
@@ -1024,11 +1024,11 @@ def _compute_sampled_logits(weights,
sampled_logits -= math_ops.log(sampled_expected_count)
# Construct output logits and labels. The true labels/logits start at col 0.
- out_logits = array_ops.concat_v2([true_logits, sampled_logits], 1)
+ out_logits = array_ops.concat([true_logits, sampled_logits], 1)
# true_logits is a float tensor, ones_like(true_logits) is a float tensor
# of ones. We then divide by num_true to ensure the per-example labels sum
# to 1.0, i.e. form a proper probability distribution.
- out_labels = array_ops.concat_v2([
+ out_labels = array_ops.concat([
array_ops.ones_like(true_logits) / num_true,
array_ops.zeros_like(sampled_logits)
], 1)
diff --git a/tensorflow/python/ops/nn_ops.py b/tensorflow/python/ops/nn_ops.py
index f5075f0675..9ad2bf998b 100644
--- a/tensorflow/python/ops/nn_ops.py
+++ b/tensorflow/python/ops/nn_ops.py
@@ -407,7 +407,7 @@ def with_space_to_batch(input, dilation_rate, padding, op, filter_shape=None, #
if const_orig is not None:
return np.concatenate(parts)
else:
- return array_ops.concat_v2(parts, 0)
+ return array_ops.concat(parts, 0)
dilation_rate = adjust(dilation_rate, 1)
paddings = adjust(paddings, 0)
@@ -1362,7 +1362,7 @@ def crelu(features, name=None):
"""
with ops.name_scope(name, "CRelu", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
- c = array_ops.concat_v2([features, -features], -1, name=name)
+ c = array_ops.concat([features, -features], -1, name=name)
return gen_nn_ops.relu(c)
@@ -1387,8 +1387,7 @@ def _flatten_outer_dims(logits):
rank = array_ops.rank(logits)
last_dim_size = array_ops.slice(
array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1])
- output = array_ops.reshape(logits,
- array_ops.concat_v2([[-1], last_dim_size], 0))
+ output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0))
# Set output shape if known.
shape = logits.get_shape()
@@ -1433,7 +1432,7 @@ def _softmax(logits, compute_op, dim=-1, name=None):
def _swap_axis(logits, dim_index, last_index):
"""Swaps logits's dim_index and last_index."""
return array_ops.transpose(logits,
- array_ops.concat_v2([
+ array_ops.concat([
math_ops.range(dim_index), [last_index],
math_ops.range(dim_index + 1, last_index),
[dim_index]
@@ -1596,7 +1595,7 @@ def softmax_cross_entropy_with_logits(_sentinel=None, # pylint: disable=invalid
if dim is not -1:
def _move_dim_to_end(tensor, dim_index, rank):
return array_ops.transpose(tensor,
- array_ops.concat_v2([
+ array_ops.concat([
math_ops.range(dim_index),
math_ops.range(dim_index + 1, rank),
[dim_index]
diff --git a/tensorflow/python/ops/random_ops.py b/tensorflow/python/ops/random_ops.py
index a1ef588502..34b4d36102 100644
--- a/tensorflow/python/ops/random_ops.py
+++ b/tensorflow/python/ops/random_ops.py
@@ -420,7 +420,7 @@ def random_gamma(shape,
name: Optional name for the operation.
Returns:
- samples: a `Tensor` of shape `tf.concat_v2(shape, tf.shape(alpha + beta))`
+ samples: a `Tensor` of shape `tf.concat(shape, tf.shape(alpha + beta))`
with values of type `dtype`.
"""
with ops.name_scope(name, "random_gamma", [shape, alpha, beta]):
diff --git a/tensorflow/python/ops/rnn.py b/tensorflow/python/ops/rnn.py
index df9dcedbfa..06ae3589a2 100644
--- a/tensorflow/python/ops/rnn.py
+++ b/tensorflow/python/ops/rnn.py
@@ -325,7 +325,7 @@ def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
It returns a tuple instead of a single concatenated `Tensor`, unlike
in the `bidirectional_rnn`. If the concatenated one is preferred,
the forward and backward outputs can be concatenated as
- `tf.concat_v2(outputs, 2)`.
+ `tf.concat(outputs, 2)`.
output_states: A tuple (output_state_fw, output_state_bw) containing
the forward and the backward final states of bidirectional rnn.
diff --git a/tensorflow/python/ops/sparse_grad.py b/tensorflow/python/ops/sparse_grad.py
index bdf512cc11..fa015856ce 100644
--- a/tensorflow/python/ops/sparse_grad.py
+++ b/tensorflow/python/ops/sparse_grad.py
@@ -191,13 +191,14 @@ def _SparseDenseCwiseMulOrDivGrad(op, grad, is_mul):
y_shape = math_ops.to_int64(array_ops.shape(y))
num_added_dims = array_ops.expand_dims(
array_ops.size(x_shape) - array_ops.size(y_shape), 0)
- augmented_y_shape = array_ops.concat_v2(
+ augmented_y_shape = array_ops.concat(
[array_ops.ones(num_added_dims, ops.dtypes.int64), y_shape], 0)
scaling = x_shape // augmented_y_shape
scaled_indices = x_indices // scaling
- scaled_indices = array_ops.slice(
- scaled_indices, array_ops.concat_v2([[0], num_added_dims], 0), [-1, -1])
+ scaled_indices = array_ops.slice(scaled_indices,
+ array_ops.concat([[0], num_added_dims], 0),
+ [-1, -1])
dense_vals = array_ops.gather_nd(y, scaled_indices)
if is_mul:
diff --git a/tensorflow/python/ops/sparse_ops.py b/tensorflow/python/ops/sparse_ops.py
index dd00a52a32..0fff2d3ba5 100644
--- a/tensorflow/python/ops/sparse_ops.py
+++ b/tensorflow/python/ops/sparse_ops.py
@@ -228,10 +228,10 @@ def sparse_concat(axis,
if expand_nonconcat_dim:
max_shape = math_ops.reduce_max(
- array_ops.concat_v2(
+ array_ops.concat(
[array_ops.reshape(shape, [1, -1]) for shape in shapes], 0), 0)
shapes = [
- array_ops.concat_v2([
+ array_ops.concat([
max_shape[:axis], shape[-1:] if axis == -1 else
shape[axis:axis + 1], [] if axis == -1 else max_shape[axis + 1:]
], 0) for shape in shapes
@@ -856,11 +856,11 @@ def sparse_merge(sp_ids, sp_values, vocab_size, name=None,
# Slice off the last dimension of indices, then tack on the ids
indices_columns_to_preserve = array_ops.slice(
sp_ids.indices, [0, 0], array_ops.stack([-1, rank - 1]))
- new_indices = array_ops.concat_v2(
+ new_indices = array_ops.concat(
[indices_columns_to_preserve, array_ops.reshape(ids, [-1, 1])], 1)
new_values = sp_values.values
- new_shape = array_ops.concat_v2([
+ new_shape = array_ops.concat([
array_ops.slice(sp_ids.dense_shape, [0],
array_ops.expand_dims(rank - 1, 0)),
math_ops.cast(array_ops.stack([vocab_size]), dtypes.int64)
@@ -1060,16 +1060,16 @@ def sparse_fill_empty_rows(sp_input, default_value, name=None):
False)
empty_row_indices_as_column = array_ops.reshape(empty_row_indices, [-1, 1])
- additional_indices = array_ops.concat_v2([
+ additional_indices = array_ops.concat([
empty_row_indices_as_column,
array_ops.zeros_like(empty_row_indices_as_column)
], 1)
additional_values = array_ops.fill(
array_ops.shape(empty_row_indices), default_value)
- all_indices_unordered = array_ops.concat_v2(
+ all_indices_unordered = array_ops.concat(
[sp_input.indices, additional_indices], 0)
- all_values_unordered = array_ops.concat_v2(
+ all_values_unordered = array_ops.concat(
[sp_input.values, additional_values], 0)
sp_unordered_output = sparse_tensor.SparseTensor(
all_indices_unordered,
diff --git a/tensorflow/python/ops/standard_ops.py b/tensorflow/python/ops/standard_ops.py
index e6fa167e23..3bc8048707 100644
--- a/tensorflow/python/ops/standard_ops.py
+++ b/tensorflow/python/ops/standard_ops.py
@@ -168,15 +168,15 @@ _allowed_symbols_array_ops = [
## documentation section to reference.
## For re-exporting to tf.*:
"constant",
- "edit_distance", # to-doc
+ "edit_distance", # to-doc
# From gen_array_ops:
- "copy_host", # to-doc
+ "copy_host", # to-doc
"immutable_const", # to-doc
"invert_permutation", # to-doc
"quantize_and_dequantize", # to-doc
# TODO(drpng): legacy symbols to be removed.
- "list_diff", # Use tf.listdiff instead.
+ "list_diff", # Use tf.listdiff instead.
"batch_matrix_diag",
"batch_matrix_band_part",
"batch_matrix_diag_part",
diff --git a/tensorflow/python/ops/variables.py b/tensorflow/python/ops/variables.py
index 97a16c1871..b9f41f80e2 100644
--- a/tensorflow/python/ops/variables.py
+++ b/tensorflow/python/ops/variables.py
@@ -1014,7 +1014,7 @@ class PartitionedVariable(object):
partition_ix = partition_axes[0]
with ops.name_scope(self._name + "/ConcatPartitions/"):
- concatenated = array_ops.concat_v2(self._variable_list, partition_ix)
+ concatenated = array_ops.concat(self._variable_list, partition_ix)
with ops.name_scope(None):
return array_ops.identity(concatenated, name=self._name)
diff --git a/tensorflow/python/training/monitored_session.py b/tensorflow/python/training/monitored_session.py
index b2e30bceda..c561ad39e4 100644
--- a/tensorflow/python/training/monitored_session.py
+++ b/tensorflow/python/training/monitored_session.py
@@ -150,7 +150,7 @@ class Scaffold(object):
default_init_op)
if self._ready_op is None:
def default_ready_op():
- return array_ops.concat_v2([
+ return array_ops.concat([
variables.report_uninitialized_variables(),
resources.report_uninitialized_resources()
], 0)
diff --git a/tensorflow/python/training/saver_test.py b/tensorflow/python/training/saver_test.py
index 2bde726b45..03d1c06476 100644
--- a/tensorflow/python/training/saver_test.py
+++ b/tensorflow/python/training/saver_test.py
@@ -766,9 +766,9 @@ class SaveRestoreShardedTest(test.TestCase):
if partitioner:
return new_vs[0].as_tensor().eval()
elif slices and slices[0] != 1:
- return array_ops.concat_v2(new_vs, 0).eval()
+ return array_ops.concat(new_vs, 0).eval()
elif slices and slices[1] != 1:
- return array_ops.concat_v2(new_vs, 1).eval()
+ return array_ops.concat(new_vs, 1).eval()
else: # Non-sliced.
return new_vs[0].eval()
@@ -1609,7 +1609,7 @@ class MetaGraphTest(test.TestCase):
batch_size = array_ops.size(labels)
labels = array_ops.expand_dims(labels, 1)
indices = array_ops.expand_dims(math_ops.range(0, batch_size), 1)
- concated = array_ops.concat_v2([indices, labels], 1)
+ concated = array_ops.concat([indices, labels], 1)
onehot_labels = sparse_ops.sparse_to_dense(
concated, array_ops.stack([batch_size, 10]), 1.0, 0.0)
logits = ops_lib.get_collection("logits")[0]